whitney-house commited on
Commit
e5cbf19
·
0 Parent(s):
backend/rag_system.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Recipe RAG System - A retrieval-augmented generation system for recipes
3
+ Author: [Your Name]
4
+ Date: February 2025
5
+
6
+ This system combines vector search with language model generation to provide
7
+ contextually relevant recipe information based on user queries.
8
+ """
9
+
10
+ from llama_index.core import VectorStoreIndex, Document, Settings
11
+ from llama_index.embeddings.huggingface import HuggingFaceEmbedding
12
+ from datasets import load_dataset
13
+ from transformers import pipeline
14
+ import torch
15
+ import logging
16
+ from typing import List, Dict, Any, Optional
17
+ import os
18
+ from dotenv import load_dotenv
19
+
20
+ # Configure logging
21
+ logging.basicConfig(
22
+ level=logging.INFO,
23
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
24
+ )
25
+ logger = logging.getLogger(__name__)
26
+
27
+ # Load environment variables
28
+ load_dotenv()
29
+
30
+ class RecipeRAGSystem:
31
+ """
32
+ A Retrieval-Augmented Generation system specialized for recipe queries.
33
+
34
+ This class handles both the retrieval of relevant recipe documents from a vector store
35
+ and the generation of helpful responses using a language model.
36
+ """
37
+
38
+ def __init__(
39
+ self,
40
+ embedding_model: str = "BAAI/bge-small-en-v1.5",
41
+ llm_model: str = "microsoft/Phi-3-mini-4k-instruct",
42
+ dataset_name: str = "m3hrdadfi/recipe_nlg_lite",
43
+ temperature: float = 0.7,
44
+ top_k: int = 3
45
+ ):
46
+ """
47
+ Initialize the RAG system with specified models and parameters.
48
+
49
+ Args:
50
+ embedding_model: Name of the HuggingFace embedding model
51
+ llm_model: Name of the language model for generation
52
+ dataset_name: Name of the HuggingFace dataset containing recipes
53
+ temperature: Sampling temperature for text generation
54
+ top_k: Default number of documents to retrieve
55
+ """
56
+ logger.info(f"Initializing RecipeRAGSystem with {embedding_model} and {llm_model}")
57
+
58
+ # Configure embedding model
59
+ Settings.llm = None
60
+ Settings.embed_model = HuggingFaceEmbedding(model_name=embedding_model)
61
+
62
+ # Configure LLM
63
+ self.generator = pipeline(
64
+ "text-generation",
65
+ model=llm_model,
66
+ torch_dtype=torch.float32,
67
+ device_map="auto" if torch.cuda.is_available() else "cpu",
68
+ do_sample=True,
69
+ temperature=temperature
70
+ )
71
+
72
+ self.default_top_k = top_k
73
+ self.dataset_name = dataset_name
74
+ self.index = None
75
+
76
+ # Build index
77
+ self._build_index()
78
+
79
+ def _build_index(self) -> None:
80
+ """
81
+ Build the vector index from the recipe dataset.
82
+
83
+ This method loads the recipe dataset and converts entries to documents
84
+ suitable for indexing with LlamaIndex.
85
+ """
86
+ logger.info(f"Building index from dataset: {self.dataset_name}")
87
+ try:
88
+ dataset = load_dataset(self.dataset_name, trust_remote_code=True)
89
+
90
+ # Build documents with structured content
91
+ documents = []
92
+ for item in dataset["train"]:
93
+ text = (
94
+ f"Recipe: {item['name']}\n"
95
+ f"Ingredients: {item['ingredients']}\n"
96
+ f"Steps: {item['steps']}"
97
+ )
98
+ documents.append(Document(text=text, metadata={"name": item["name"]}))
99
+
100
+ logger.info(f"Created {len(documents)} recipe documents")
101
+ self.index = VectorStoreIndex.from_documents(documents)
102
+
103
+ except Exception as e:
104
+ logger.error(f"Error building index: {str(e)}")
105
+ raise
106
+
107
+ def retrieve(self, query: str, top_k: Optional[int] = None) -> Dict[str, Any]:
108
+ """
109
+ Retrieve relevant recipe documents for a user query.
110
+
111
+ Args:
112
+ query: The user's recipe-related query
113
+ top_k: Number of documents to retrieve (overrides default)
114
+
115
+ Returns:
116
+ A dictionary containing query results and metadata
117
+ """
118
+ k = top_k or self.default_top_k
119
+ logger.info(f"Retrieving top {k} documents for query: '{query}'")
120
+
121
+ try:
122
+ query_engine = self.index.as_query_engine(similarity_top_k=k)
123
+ result = query_engine.query(query)
124
+
125
+ # Extract source nodes and metadata
126
+ sources = [n.node.get_content() for n in result.source_nodes]
127
+ metadata = [n.node.metadata for n in result.source_nodes]
128
+
129
+ return {
130
+ "result": result,
131
+ "sources": sources,
132
+ "metadata": metadata
133
+ }
134
+ except Exception as e:
135
+ logger.error(f"Retrieval error: {str(e)}")
136
+ raise
137
+
138
+ def generate_answer(self, question: str, context: List[str]) -> str:
139
+ """
140
+ Generate a helpful response based on the retrieved recipe context.
141
+
142
+ Args:
143
+ question: The user's original question
144
+ context: List of retrieved recipe documents as context
145
+
146
+ Returns:
147
+ The generated response
148
+ """
149
+ logger.info(f"Generating answer for: '{question}'")
150
+
151
+ try:
152
+ # Build a detailed prompt with system context and retrieved information
153
+ prompt = f"""
154
+ [INST] <<SYS>>
155
+ You are a professional recipe assistant. Answer the user's question based on the following recipes:
156
+
157
+ {"".join(context)}
158
+ <</SYS>>
159
+
160
+ Question: {question} [/INST]
161
+ """
162
+
163
+ result = self.generator(
164
+ prompt,
165
+ max_new_tokens=256,
166
+ temperature=0.7
167
+ )[0]['generated_text']
168
+
169
+ # Extract just the model's response part
170
+ response = result.split("[/INST]")[-1].strip()
171
+ return response
172
+
173
+ except Exception as e:
174
+ logger.error(f"Generation error: {str(e)}")
175
+ raise
backend/requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ fastapi>=0.68.0
2
+ uvicorn>=0.15.0
3
+ python-dotenv>=0.19.0
4
+ datasets>=2.0.0
5
+ transformers>=4.30.0
6
+ torch>=2.0.0
7
+ llama-index-core>=0.10.0
8
+ llama-index-embeddings-huggingface>=0.1.0
9
+ pytest>=7.4.0
10
+ httpx>=0.24.0
backend/server.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException, Depends, Request
2
+ from fastapi.middleware.cors import CORSMiddleware
3
+ from fastapi.responses import JSONResponse
4
+ from pydantic import BaseModel, Field
5
+ from typing import List, Dict, Any, Optional
6
+ import logging
7
+ import time
8
+ import os
9
+ from dotenv import load_dotenv
10
+
11
+ # Import the RAG system
12
+ from rag_system import RecipeRAGSystem
13
+
14
+ # Load environment variables
15
+ load_dotenv()
16
+
17
+ # Configure logging
18
+ logging.basicConfig(
19
+ level=logging.INFO,
20
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
21
+ )
22
+ logger = logging.getLogger(__name__)
23
+
24
+ # Initialize FastAPI app
25
+ app = FastAPI(
26
+ title="Recipe Assistant API",
27
+ description="An API for a Recipe RAG (Retrieval-Augmented Generation) system",
28
+ version="1.0.0"
29
+ )
30
+
31
+ # CORS middleware
32
+ app.add_middleware(
33
+ CORSMiddleware,
34
+ allow_origins=os.getenv("ALLOWED_ORIGINS", "*").split(","),
35
+ allow_methods=["*"],
36
+ allow_headers=["*"],
37
+ )
38
+
39
+ # Request models
40
+ class ChatRequest(BaseModel):
41
+ message: str = Field(..., description="User's recipe-related question")
42
+ top_k: Optional[int] = Field(3, description="Number of recipes to retrieve")
43
+
44
+ class FeedbackRequest(BaseModel):
45
+ query_id: str = Field(..., description="Unique ID of the query")
46
+ rating: int = Field(..., ge=1, le=5, description="Rating (1-5)")
47
+ feedback: Optional[str] = Field(None, description="Optional feedback text")
48
+
49
+ # Response models
50
+ class ChatResponse(BaseModel):
51
+ answer: str = Field(..., description="Generated answer to the user's question")
52
+ sources: List[str] = Field(..., description="Recipe sources used for the answer")
53
+ query_id: str = Field(..., description="Unique ID for this query")
54
+ response_time: float = Field(..., description="Processing time in seconds")
55
+
56
+ # Request ID middleware
57
+ @app.middleware("http")
58
+ async def add_process_time_header(request: Request, call_next):
59
+ start_time = time.time()
60
+ response = await call_next(request)
61
+ process_time = time.time() - start_time
62
+ response.headers["X-Process-Time"] = str(process_time)
63
+ return response
64
+
65
+ # Global error handler
66
+ @app.exception_handler(Exception)
67
+ async def global_exception_handler(request: Request, exc: Exception):
68
+ logger.error(f"Global error: {str(exc)}")
69
+ return JSONResponse(
70
+ status_code=500,
71
+ content={"detail": "An unexpected error occurred", "type": type(exc).__name__},
72
+ )
73
+
74
+ # Initialize the RAG system (with lazy loading)
75
+ _rag_system = None
76
+
77
+ def get_rag_system():
78
+ """
79
+ Lazily initialize the RAG system.
80
+ This helps with faster API startup and better resource management.
81
+ """
82
+ global _rag_system
83
+ if _rag_system is None:
84
+ logger.info("Initializing RAG system")
85
+ _rag_system = RecipeRAGSystem(
86
+ embedding_model=os.getenv("EMBEDDING_MODEL", "BAAI/bge-small-en-v1.5"),
87
+ llm_model=os.getenv("LLM_MODEL", "microsoft/Phi-3-mini-4k-instruct"),
88
+ temperature=float(os.getenv("TEMPERATURE", "0.7"))
89
+ )
90
+ return _rag_system
91
+
92
+ # API endpoints
93
+ @app.post("/api/chat", response_model=ChatResponse)
94
+ async def chat_endpoint(request: ChatRequest, rag_system: RecipeRAGSystem = Depends(get_rag_system)):
95
+ """
96
+ Process a user query about recipes and return a generated response with sources.
97
+ """
98
+ start_time = time.time()
99
+ query_id = f"query-{int(start_time)}"
100
+ logger.info(f"Processing chat request {query_id}: {request.message}")
101
+
102
+ try:
103
+ # Retrieve relevant recipes
104
+ retrieval_result = rag_system.retrieve(request.message, request.top_k)
105
+ sources = retrieval_result["sources"]
106
+
107
+ # Generate answer based on retrieved context
108
+ answer = rag_system.generate_answer(request.message, sources)
109
+
110
+ # Calculate response time
111
+ response_time = time.time() - start_time
112
+
113
+ return {
114
+ "answer": answer,
115
+ "sources": sources,
116
+ "query_id": query_id,
117
+ "response_time": response_time
118
+ }
119
+ except Exception as e:
120
+ logger.error(f"Error in chat endpoint: {str(e)}")
121
+ raise HTTPException(status_code=500, detail=str(e))
122
+
123
+ @app.post("/api/feedback", status_code=201)
124
+ async def feedback_endpoint(request: FeedbackRequest):
125
+ """
126
+ Collect user feedback about the quality of responses.
127
+ In a real implementation, this would store feedback in a database.
128
+ """
129
+ logger.info(f"Received feedback for query {request.query_id}: rating={request.rating}")
130
+ # In a production system, you would store this feedback in a database
131
+ return {"status": "Feedback received", "query_id": request.query_id}
132
+
133
+ @app.get("/health")
134
+ async def health_check():
135
+ """
136
+ Health check endpoint for monitoring and load balancers.
137
+ """
138
+ return {"status": "healthy", "timestamp": time.time()}
139
+ @app.get("/")
140
+ async def root():
141
+ return {
142
+ "message": "Welcome to Recipe Assistant API",
143
+ "endpoints": {
144
+ "/api/chat": "POST - Submit recipe questions",
145
+ "/api/feedback": "POST - Submit feedback",
146
+ "/health": "GET - Service health check",
147
+ "/docs": "API documentation"
148
+ }
149
+ }
150
+
151
+
152
+ if __name__ == "__main__":
153
+ import uvicorn
154
+ port = int(os.getenv("PORT", "8000"))
155
+ log_level = os.getenv("LOG_LEVEL", "info")
156
+
157
+ logger.info(f"Starting API server on port {port}")
158
+ uvicorn.run(
159
+ "server:app",
160
+ host="0.0.0.0",
161
+ port=port,
162
+ log_level=log_level,
163
+ reload=os.getenv("ENVIRONMENT", "production") == "development"
164
+ )
fronted/.gitignore ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Logs
2
+ logs
3
+ *.log
4
+ npm-debug.log*
5
+ yarn-debug.log*
6
+ yarn-error.log*
7
+ pnpm-debug.log*
8
+ lerna-debug.log*
9
+
10
+ node_modules
11
+ dist
12
+ dist-ssr
13
+ *.local
14
+
15
+ # Editor directories and files
16
+ .vscode/*
17
+ !.vscode/extensions.json
18
+ .idea
19
+ .DS_Store
20
+ *.suo
21
+ *.ntvs*
22
+ *.njsproj
23
+ *.sln
24
+ *.sw?
fronted/README.md ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # React + Vite
2
+
3
+ This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules.
4
+
5
+ Currently, two official plugins are available:
6
+
7
+ - [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react/README.md) uses [Babel](https://babeljs.io/) for Fast Refresh
8
+ - [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react-swc) uses [SWC](https://swc.rs/) for Fast Refresh
fronted/eslint.config.js ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import js from '@eslint/js'
2
+ import globals from 'globals'
3
+ import react from 'eslint-plugin-react'
4
+ import reactHooks from 'eslint-plugin-react-hooks'
5
+ import reactRefresh from 'eslint-plugin-react-refresh'
6
+
7
+ export default [
8
+ { ignores: ['dist'] },
9
+ {
10
+ files: ['**/*.{js,jsx}'],
11
+ languageOptions: {
12
+ ecmaVersion: 2020,
13
+ globals: globals.browser,
14
+ parserOptions: {
15
+ ecmaVersion: 'latest',
16
+ ecmaFeatures: { jsx: true },
17
+ sourceType: 'module',
18
+ },
19
+ },
20
+ settings: { react: { version: '18.3' } },
21
+ plugins: {
22
+ react,
23
+ 'react-hooks': reactHooks,
24
+ 'react-refresh': reactRefresh,
25
+ },
26
+ rules: {
27
+ ...js.configs.recommended.rules,
28
+ ...react.configs.recommended.rules,
29
+ ...react.configs['jsx-runtime'].rules,
30
+ ...reactHooks.configs.recommended.rules,
31
+ 'react/jsx-no-target-blank': 'off',
32
+ 'react-refresh/only-export-components': [
33
+ 'warn',
34
+ { allowConstantExport: true },
35
+ ],
36
+ },
37
+ },
38
+ ]
fronted/index.html ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!doctype html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8" />
5
+ <link rel="icon" type="image/svg+xml" href="/vite.svg" />
6
+ <meta name="viewport" content="width=device-width, initial-scale=1.0" />
7
+ <title>Vite + React</title>
8
+ </head>
9
+ <body>
10
+ <div id="root"></div>
11
+ <script type="module" src="/src/main.jsx"></script>
12
+ </body>
13
+ </html>
fronted/package-lock.json ADDED
The diff for this file is too large to render. See raw diff
 
fronted/package.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "fronted",
3
+ "private": true,
4
+ "version": "0.0.0",
5
+ "type": "module",
6
+ "scripts": {
7
+ "dev": "vite",
8
+ "build": "vite build",
9
+ "lint": "eslint .",
10
+ "preview": "vite preview"
11
+ },
12
+ "dependencies": {
13
+ "@emotion/react": "^11.14.0",
14
+ "@emotion/styled": "^11.14.0",
15
+ "axios": "^1.7.9",
16
+ "react": "^19.0.0",
17
+ "react-dom": "^19.0.0",
18
+ "react-markdown": "^10.0.0"
19
+ },
20
+ "devDependencies": {
21
+ "@eslint/js": "^9.19.0",
22
+ "@types/react": "^19.0.8",
23
+ "@types/react-dom": "^19.0.3",
24
+ "@vitejs/plugin-react": "^4.3.4",
25
+ "eslint": "^9.19.0",
26
+ "eslint-plugin-react": "^7.37.4",
27
+ "eslint-plugin-react-hooks": "^5.0.0",
28
+ "eslint-plugin-react-refresh": "^0.4.18",
29
+ "globals": "^15.14.0",
30
+ "vite": "^6.1.0"
31
+ }
32
+ }
fronted/public/vite.svg ADDED
fronted/src/App.css ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Base styles */
2
+ body {
3
+ font-family: Arial, sans-serif;
4
+ background-color: #f0f2f5;
5
+ margin: 0;
6
+ padding: 0;
7
+ }
8
+
9
+ .app {
10
+ max-width: 800px;
11
+ margin: 0 auto;
12
+ padding: 20px;
13
+ }
14
+
15
+ h1 {
16
+ text-align: center;
17
+ color: #333;
18
+ }
19
+
20
+ /* Chat container */
21
+ .chat-container {
22
+ background: white;
23
+ border-radius: 10px;
24
+ box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);
25
+ padding: 20px;
26
+ }
27
+
28
+ /* Message styles */
29
+ .messages {
30
+ max-height: 500px;
31
+ overflow-y: auto;
32
+ margin-bottom: 20px;
33
+ }
34
+
35
+ .message {
36
+ margin-bottom: 15px;
37
+ display: flex;
38
+ flex-direction: column;
39
+ }
40
+
41
+ .message.user {
42
+ align-items: flex-end;
43
+ }
44
+
45
+ .message.bot {
46
+ align-items: flex-start;
47
+ }
48
+
49
+ .message .content {
50
+ max-width: 70%;
51
+ padding: 10px 15px;
52
+ border-radius: 10px;
53
+ background-color: #e8e8e8;
54
+ }
55
+
56
+ .message.user .content {
57
+ background-color: #dcf8c6;
58
+ }
59
+
60
+ /* Loading indicator */
61
+ .loading {
62
+ text-align: center;
63
+ font-style: italic;
64
+ color: #666;
65
+ }
66
+
67
+ /* Input and button */
68
+ form {
69
+ display: flex;
70
+ gap: 10px;
71
+ }
72
+
73
+ input {
74
+ flex: 1;
75
+ padding: 10px;
76
+ border: 1px solid #ddd;
77
+ border-radius: 20px;
78
+ outline: none;
79
+ }
80
+
81
+ button {
82
+ padding: 10px 20px;
83
+ background-color: #075e54;
84
+ color: white;
85
+ border: none;
86
+ border-radius: 20px;
87
+ cursor: pointer;
88
+ }
89
+
90
+ button:disabled {
91
+ background-color: #ccc;
92
+ cursor: not-allowed;
93
+ }
94
+
95
+ /* Reference recipes */
96
+ .sources {
97
+ margin-top: 10px;
98
+ font-size: 0.9em;
99
+ color: #666;
100
+ }
101
+
102
+ .sources h4 {
103
+ margin: 0 0 5px 0;
104
+ font-size: 1em;
105
+ }
106
+
107
+ .source {
108
+ background: #f9f9f9;
109
+ padding: 5px 10px;
110
+ border-radius: 5px;
111
+ margin-bottom: 5px;
112
+ }
fronted/src/App.jsx ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import React from 'react';
2
+ import Chat from './components/Chat'; // Ensure the path is correct
3
+ import './app.css'; // Ensure the component styles are imported
4
+
5
+ function App() {
6
+ return (
7
+ <div className="app">
8
+ <h1>Recipe Assistant</h1>
9
+ <Chat />
10
+ </div>
11
+ );
12
+ }
13
+
14
+ export default App;
fronted/src/assets/react.svg ADDED
fronted/src/components/Chat.jsx ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import React, { useState } from 'react';
2
+ import ReactMarkdown from 'react-markdown';
3
+ import axios from 'axios';
4
+
5
+ const Chat = () => {
6
+ const [messages, setMessages] = useState([]);
7
+ const [input, setInput] = useState('');
8
+ const [loading, setLoading] = useState(false);
9
+
10
+ const handleSubmit = async (e) => {
11
+ e.preventDefault();
12
+ if (!input.trim()) return;
13
+
14
+ setLoading(true);
15
+ try {
16
+ // Add user message
17
+ setMessages(prev => [...prev, { role: 'user', content: input }]);
18
+
19
+ // Get AI response
20
+ const response = await axios.post('http://localhost:8000/api/chat', {
21
+ message: input
22
+ });
23
+
24
+ // Add AI response
25
+ setMessages(prev => [
26
+ ...prev,
27
+ {
28
+ role: 'bot',
29
+ content: response.data.answer,
30
+ sources: response.data.sources
31
+ }
32
+ ]);
33
+ } catch (error) {
34
+ console.error('Error:', error);
35
+ }
36
+ setLoading(false);
37
+ setInput('');
38
+ };
39
+
40
+ return (
41
+ <div className="chat-container">
42
+ <div className="messages">
43
+ {messages.map((msg, i) => (
44
+ <div key={i} className={`message ${msg.role}`}>
45
+ <div className="content">
46
+ <ReactMarkdown>{msg.content}</ReactMarkdown>
47
+ {msg.sources && (
48
+ <div className="sources">
49
+ <h4>Reference Recipes:</h4>
50
+ {msg.sources.map((source, idx) => (
51
+ <div key={idx} className="source">
52
+ <p>{source.substring(0, 150)}...</p>
53
+ </div>
54
+ ))}
55
+ </div>
56
+ )}
57
+ </div>
58
+ </div>
59
+ ))}
60
+ {loading && <div className="loading">Thinking...</div>}
61
+ </div>
62
+
63
+ <form onSubmit={handleSubmit}>
64
+ <input
65
+ value={input}
66
+ onChange={(e) => setInput(e.target.value)}
67
+ placeholder="Enter your recipe question..."
68
+ />
69
+ <button type="submit" disabled={loading}>
70
+ {loading ? 'Sending...' : 'Send'}
71
+ </button>
72
+ </form>
73
+ </div>
74
+ );
75
+ };
76
+
77
+ export default Chat;
fronted/src/index.css ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Global reset */
2
+ * {
3
+ margin: 0;
4
+ padding: 0;
5
+ box-sizing: border-box;
6
+ }
7
+
8
+ /* Root element styles */
9
+ #root {
10
+ min-height: 100vh;
11
+ display: flex;
12
+ flex-direction: column;
13
+ align-items: center;
14
+ justify-content: center;
15
+ }
fronted/src/main.jsx ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import React from 'react';
2
+ import ReactDOM from 'react-dom/client';
3
+ import App from './App'; // Ensure the path is correct
4
+ import './index.css'; // Ensure the global styles are imported
5
+
6
+ const root = ReactDOM.createRoot(document.getElementById('root'));
7
+ root.render(
8
+ <React.StrictMode>
9
+ <App />
10
+ </React.StrictMode>
11
+ );
fronted/vite.config.js ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import { defineConfig } from 'vite'
2
+ import react from '@vitejs/plugin-react'
3
+
4
+ // https://vite.dev/config/
5
+ export default defineConfig({
6
+ plugins: [react()],
7
+ })
installed.txt ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ absl-py @ file:///home/conda/feedstock_root/build_artifacts/absl-py_1705494584803/work
2
+ accelerate==1.3.0
3
+ aiofiles @ file:///home/conda/feedstock_root/build_artifacts/aiofiles_1698945915105/work
4
+ aiohttp @ file:///Users/runner/miniforge3/conda-bld/aiohttp_1710511610826/work
5
+ aiosignal @ file:///home/conda/feedstock_root/build_artifacts/aiosignal_1667935791922/work
6
+ altair @ file:///home/conda/feedstock_root/build_artifacts/altair-split_1711824856061/work
7
+ annotated-types @ file:///home/conda/feedstock_root/build_artifacts/annotated-types_1696634205638/work
8
+ anyio @ file:///home/conda/feedstock_root/build_artifacts/anyio_1688651106312/work/dist
9
+ astor==0.8.1
10
+ astunparse @ file:///home/conda/feedstock_root/build_artifacts/astunparse_1610696312422/work
11
+ async-timeout @ file:///home/conda/feedstock_root/build_artifacts/async-timeout_1691763562544/work
12
+ attrs @ file:///private/var/folders/k1/30mswbxs7r1g6zwn8y4fyt500000gp/T/abs_224434dqzl/croot/attrs_1695717839274/work
13
+ blinker @ file:///home/conda/feedstock_root/build_artifacts/blinker_1698890160476/work
14
+ blis @ file:///Users/runner/miniforge3/conda-bld/cython-blis_1696148899313/work
15
+ boto3==1.34.87
16
+ botocore==1.34.87
17
+ branca @ file:///home/conda/feedstock_root/build_artifacts/branca_1714071803448/work
18
+ Brotli @ file:///Users/runner/miniforge3/conda-bld/brotli-split_1695989934239/work
19
+ cached-property @ file:///home/conda/feedstock_root/build_artifacts/cached_property_1615209429212/work
20
+ cachetools @ file:///home/conda/feedstock_root/build_artifacts/cachetools_1708987703938/work
21
+ catalogue @ file:///Users/runner/miniforge3/conda-bld/catalogue_1695626443928/work
22
+ certifi==2023.7.22
23
+ cffi @ file:///private/var/folders/nz/j6p8yfhx1mv_0grj5xl4650h0000gp/T/abs_ab19r4bji3/croot/cffi_1670423206034/work
24
+ charset-normalizer @ file:///home/conda/feedstock_root/build_artifacts/charset-normalizer_1698833585322/work
25
+ chinese-converter==1.1.1
26
+ click @ file:///home/conda/feedstock_root/build_artifacts/click_1692311806742/work
27
+ cloudpathlib @ file:///home/conda/feedstock_root/build_artifacts/cloudpathlib-meta_1697837790453/work
28
+ cloudpickle==3.0.0
29
+ colorama @ file:///home/conda/feedstock_root/build_artifacts/colorama_1666700638685/work
30
+ colorgram.py==1.2.0
31
+ compress-fasttext==0.1.5
32
+ confection @ file:///Users/runner/miniforge3/conda-bld/confection_1701179345946/work
33
+ conll-df==0.0.4
34
+ contourpy==1.1.0
35
+ cryptography @ file:///Users/runner/miniforge3/conda-bld/cryptography-split_1708780317904/work
36
+ ctranslate2==3.24.0
37
+ curated-tokenizers==0.0.8
38
+ curated-transformers==0.1.1
39
+ cycler==0.11.0
40
+ cymem @ file:///Users/runner/miniforge3/conda-bld/cymem_1695443580499/work
41
+ Cython==3.0.9
42
+ dataclasses-json==0.6.4
43
+ datasets @ file:///home/conda/feedstock_root/build_artifacts/datasets_1709395865330/work
44
+ de-core-news-sm @ https://github.com/explosion/spacy-models/releases/download/de_core_news_sm-3.7.0/de_core_news_sm-3.7.0-py3-none-any.whl#sha256=d88c737eb7eb766f730f6a2dcb99dfcdb81623e1e0d89a9c638a2182ac19c52e
45
+ Deprecated==1.2.14
46
+ diffusers @ file:///home/conda/feedstock_root/build_artifacts/diffusers_1736990313447/work
47
+ dill @ file:///home/conda/feedstock_root/build_artifacts/dill_1706434688412/work
48
+ dimsim==0.2.2
49
+ dirtyjson==1.0.8
50
+ distro==1.9.0
51
+ en-core-web-lg @ https://github.com/explosion/spacy-models/releases/download/en_core_web_lg-3.7.1/en_core_web_lg-3.7.1-py3-none-any.whl#sha256=ab70aeb6172cde82508f7739f35ebc9918a3d07debeed637403c8f794ba3d3dc
52
+ en-core-web-sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.7.1/en_core_web_sm-3.7.1-py3-none-any.whl#sha256=86cc141f63942d4b2c5fcee06630fd6f904788d2f0ab005cce45aadb8fb73889
53
+ es-dep-news-trf @ https://github.com/explosion/spacy-models/releases/download/es_dep_news_trf-3.7.2/es_dep_news_trf-3.7.2-py3-none-any.whl#sha256=03117d52d9077a5af65a8163b419fb94569e2048039bdbacd6484b27bfcbd272
54
+ exceptiongroup @ file:///home/conda/feedstock_root/build_artifacts/exceptiongroup_1733208806608/work
55
+ fastapi @ file:///home/conda/feedstock_root/build_artifacts/fastapi_1695951101499/work
56
+ ffmpy @ file:///home/conda/feedstock_root/build_artifacts/ffmpy_1659474992694/work
57
+ filelock==3.12.3
58
+ flatbuffers @ file:///home/conda/feedstock_root/build_artifacts/python-flatbuffers_1711466727397/work
59
+ folium @ file:///home/conda/feedstock_root/build_artifacts/folium_1718605969954/work
60
+ fonttools==4.42.1
61
+ frozenlist==1.4.0
62
+ fsspec @ file:///home/conda/feedstock_root/build_artifacts/fsspec_1707102468451/work
63
+ gast @ file:///home/conda/feedstock_root/build_artifacts/gast_1688368721366/work
64
+ GDAL @ file:///Users/runner/miniforge3/conda-bld/gdal-split_1700094783563/work/build/swig/python
65
+ gensim==4.3.2
66
+ geopandas @ file:///home/conda/feedstock_root/build_artifacts/geopandas_1726897840759/work
67
+ gmpy2 @ file:///Users/runner/miniforge3/conda-bld/gmpy2_1666808753481/work
68
+ google-auth @ file:///home/conda/feedstock_root/build_artifacts/google-auth_1711011113292/work
69
+ google-auth-oauthlib @ file:///home/conda/feedstock_root/build_artifacts/google-auth-oauthlib_1702414855226/work
70
+ google-pasta==0.2.0
71
+ gradio @ file:///home/conda/feedstock_root/build_artifacts/gradio_1722534833126/work
72
+ gradio_client @ file:///home/conda/feedstock_root/build_artifacts/gradio-client_1722483282795/work
73
+ greenlet==3.0.3
74
+ grpcio @ file:///Users/runner/miniforge3/conda-bld/grpc-split_1707750240521/work
75
+ h11 @ file:///home/conda/feedstock_root/build_artifacts/h11_1733327467879/work
76
+ h2 @ file:///home/conda/feedstock_root/build_artifacts/h2_1733298745555/work
77
+ h5py @ file:///Users/runner/miniforge3/conda-bld/h5py_1712765738236/work
78
+ hanziconv==0.3.2
79
+ hfst==3.16.0.1
80
+ hpack @ file:///home/conda/feedstock_root/build_artifacts/hpack_1733299205993/work
81
+ httpcore==1.0.4
82
+ httpx==0.27.0
83
+ huggingface_hub @ file:///home/conda/feedstock_root/build_artifacts/huggingface_hub_1711986612800/work
84
+ hyperframe @ file:///home/conda/feedstock_root/build_artifacts/hyperframe_1733298771451/work
85
+ idna @ file:///private/var/folders/nz/j6p8yfhx1mv_0grj5xl4650h0000gp/T/abs_771olrhiqw/croot/idna_1666125579282/work
86
+ importlib-metadata @ file:///home/conda/feedstock_root/build_artifacts/importlib-metadata_1688754491823/work
87
+ importlib_resources @ file:///home/conda/feedstock_root/build_artifacts/importlib_resources_1736252299705/work
88
+ jieba==0.42.1
89
+ Jinja2 @ file:///private/var/folders/nz/j6p8yfhx1mv_0grj5xl4650h0000gp/T/abs_9fjgzv9ant/croot/jinja2_1666908141308/work
90
+ jmespath==1.0.1
91
+ joblib==1.4.0
92
+ jsonpatch==1.33
93
+ jsonpointer==2.4
94
+ jsonschema @ file:///home/conda/feedstock_root/build_artifacts/jsonschema_1733472696581/work
95
+ jsonschema-specifications @ file:///tmp/tmpk0f344m9/src
96
+ keras @ file:///home/conda/feedstock_root/build_artifacts/keras_1700038168849/work/keras-2.15.0-py3-none-any.whl#sha256=2dcc6d2e30cf9c951064b63c1f4c404b966c59caf09e01f3549138ec8ee0dd1f
97
+ kiwisolver @ file:///Users/runner/miniforge3/conda-bld/kiwisolver_1695379982481/work
98
+ LAC==2.1.2
99
+ langchain==0.1.16
100
+ langchain-community==0.0.33
101
+ langchain-core==0.1.44
102
+ langchain-text-splitters==0.0.1
103
+ langcodes @ file:///home/conda/feedstock_root/build_artifacts/langcodes_1636741340529/work
104
+ langsmith==0.1.48
105
+ lightgbm @ file:///home/conda/feedstock_root/build_artifacts/liblightgbm_1728547591057/work
106
+ llama-index-agent-openai==0.2.2
107
+ llama-index-cli==0.1.12
108
+ llama-index-core==0.10.30
109
+ llama-index-embeddings-openai==0.1.7
110
+ llama-index-indices-managed-llama-cloud==0.1.5
111
+ llama-index-legacy==0.9.48
112
+ llama-index-llms-openai==0.1.15
113
+ llama-index-multi-modal-llms-openai==0.1.5
114
+ llama-index-program-openai==0.1.5
115
+ llama-index-question-gen-openai==0.1.3
116
+ llama-index-readers-file==0.1.19
117
+ llama-index-readers-llama-parse==0.1.4
118
+ llama-parse==0.4.1
119
+ llamaindex-py-client==0.1.18
120
+ lxml @ file:///Users/runner/miniforge3/conda-bld/lxml_1715598488246/work
121
+ mapclassify @ file:///home/conda/feedstock_root/build_artifacts/mapclassify_1723589646909/work
122
+ Markdown==3.5.1
123
+ markdown-it-py @ file:///home/conda/feedstock_root/build_artifacts/markdown-it-py_1686175045316/work
124
+ MarkupSafe @ file:///private/var/folders/nz/j6p8yfhx1mv_0grj5xl4650h0000gp/T/abs_12c133f5-0720-4727-9c18-599a3af825723lzwham3/croots/recipe/markupsafe_1654597866058/work
125
+ marshmallow==3.21.1
126
+ matplotlib @ file:///Users/runner/miniforge3/conda-bld/matplotlib-suite_1712606004214/work
127
+ mdurl @ file:///home/conda/feedstock_root/build_artifacts/mdurl_1639515908913/work
128
+ mistune @ file:///home/conda/feedstock_root/build_artifacts/mistune_1698947099619/work
129
+ ml-dtypes @ file:///Users/runner/miniforge3/conda-bld/ml_dtypes_1695281015782/work
130
+ mpmath @ file:///home/conda/feedstock_root/build_artifacts/mpmath_1678228039184/work
131
+ multidict==6.0.4
132
+ multiprocess @ file:///Users/runner/miniforge3/conda-bld/multiprocess_1706514928763/work
133
+ munkres==1.1.4
134
+ murmurhash @ file:///Users/runner/miniforge3/conda-bld/murmurhash_1695449903748/work
135
+ mypy-extensions==1.0.0
136
+ networkx==3.1
137
+ nltk==3.8.1
138
+ numpy @ file:///Users/runner/miniforge3/conda-bld/numpy_1707225416355/work/dist/numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl#sha256=14c628943f67e9c10d22faa9c1cdcbf69c3192e88f2f597d03b77a252e589567
139
+ oauthlib @ file:///home/conda/feedstock_root/build_artifacts/oauthlib_1666056362788/work
140
+ openai==1.22.0
141
+ opt-einsum @ file:///home/conda/feedstock_root/build_artifacts/opt_einsum_1696448916724/work
142
+ orjson==3.10.1
143
+ packaging==23.2
144
+ paddle-bfloat==0.1.7
145
+ paddlepaddle==2.4.0
146
+ pandas==2.1.0
147
+ pathy @ file:///private/var/folders/nz/j6p8yfhx1mv_0grj5xl4650h0000gp/T/abs_24y4elqnou/croot/pathy_1674585921280/work
148
+ Pillow==10.0.0
149
+ pinyin-jyutping-sentence==1.3
150
+ pkgutil_resolve_name @ file:///home/conda/feedstock_root/build_artifacts/pkgutil-resolve-name_1733344503739/work
151
+ pkuseg==0.0.25
152
+ plotly==5.24.1
153
+ portalocker==2.10.1
154
+ preshed @ file:///Users/runner/miniforge3/conda-bld/preshed_1695645021215/work
155
+ protobuf==3.20.0
156
+ psutil==5.9.8
157
+ pyarrow==15.0.0
158
+ pyarrow-hotfix @ file:///home/conda/feedstock_root/build_artifacts/pyarrow-hotfix_1700596371886/work
159
+ pyasn1 @ file:///home/conda/feedstock_root/build_artifacts/pyasn1_1713209357222/work
160
+ pyasn1_modules @ file:///home/conda/feedstock_root/build_artifacts/pyasn1-modules_1713209683338/work
161
+ pyconll==3.2.0
162
+ pycparser @ file:///tmp/build/80754af9/pycparser_1636541352034/work
163
+ pydantic @ file:///home/conda/feedstock_root/build_artifacts/pydantic_1700669197061/work
164
+ pydantic_core @ file:///Users/runner/miniforge3/conda-bld/pydantic-core_1700664087163/work
165
+ pydub @ file:///home/conda/feedstock_root/build_artifacts/pydub_1734638032476/work
166
+ Pygments @ file:///home/conda/feedstock_root/build_artifacts/pygments_1691408637400/work
167
+ PyJWT @ file:///home/conda/feedstock_root/build_artifacts/pyjwt_1706895065046/work
168
+ pyogrio @ file:///Users/runner/miniforge3/conda-bld/pyogrio_1700083253656/work
169
+ pyonmttok==1.37.1
170
+ pyOpenSSL @ file:///home/conda/feedstock_root/build_artifacts/pyopenssl_1706660063483/work
171
+ pyparsing==3.1.1
172
+ pypdf==4.2.0
173
+ pypinyin==0.52.0
174
+ pyproj @ file:///Users/runner/miniforge3/conda-bld/pyproj_1699268117362/work
175
+ pyrsistent @ file:///Users/ktietz/ci_310/pyrsistent_1643962172005/work
176
+ PySocks @ file:///home/conda/feedstock_root/build_artifacts/pysocks_1661604839144/work
177
+ python-dateutil @ file:///home/conda/feedstock_root/build_artifacts/python-dateutil_1626286286081/work
178
+ python-dotenv==1.0.1
179
+ python-multipart @ file:///home/conda/feedstock_root/build_artifacts/python-multipart_1734420773152/work
180
+ pytz==2023.3.post1
181
+ pyu2f @ file:///home/conda/feedstock_root/build_artifacts/pyu2f_1604248910016/work
182
+ PyYAML @ file:///Users/runner/miniforge3/conda-bld/pyyaml_1695373498369/work
183
+ pyzmq==26.0.0
184
+ referencing @ file:///home/conda/feedstock_root/build_artifacts/referencing_1737120398748/work
185
+ regex==2023.10.3
186
+ requests @ file:///home/conda/feedstock_root/build_artifacts/requests_1684774241324/work
187
+ requests-oauthlib @ file:///home/conda/feedstock_root/build_artifacts/requests-oauthlib_1711290127547/work
188
+ rich @ file:///home/conda/feedstock_root/build_artifacts/rich-split_1700160075651/work/dist
189
+ rouge==1.0.1
190
+ rpds-py @ file:///Users/runner/miniforge3/conda-bld/rpds-py_1733366666571/work
191
+ rsa @ file:///home/conda/feedstock_root/build_artifacts/rsa_1658328885051/work
192
+ ruff @ file:///Users/runner/miniforge3/conda-bld/ruff_1737378870230/work
193
+ s3transfer==0.10.1
194
+ sacrebleu==2.4.2
195
+ sacremoses==0.1.1
196
+ safetensors @ file:///Users/runner/miniforge3/conda-bld/safetensors_1713253511904/work
197
+ scikit-learn==1.4.2
198
+ SciPy @ file:///Users/runner/miniforge3/conda-bld/scipy-split_1700812700233/work/dist/scipy-1.11.4-cp310-cp310-macosx_11_0_arm64.whl#sha256=375d32c2e30658f658c57cabef9cbbe6df2df8a14f5cb858d49fc66e910be7a5
199
+ seaborn==0.12.2
200
+ semantic-version @ file:///home/conda/feedstock_root/build_artifacts/semantic_version_1653579368137/work
201
+ sentencepiece==0.2.0
202
+ shapely @ file:///Users/runner/miniforge3/conda-bld/shapely_1697191406032/work
203
+ shellingham @ file:///home/conda/feedstock_root/build_artifacts/shellingham_1698144360966/work
204
+ six @ file:///home/conda/feedstock_root/build_artifacts/six_1620240208055/work
205
+ smart-open @ file:///home/conda/feedstock_root/build_artifacts/smart_open_split_1694066705667/work/dist
206
+ sniffio @ file:///home/conda/feedstock_root/build_artifacts/sniffio_1733244044561/work
207
+ spaces==0.32.0
208
+ spacy @ file:///Users/runner/miniforge3/conda-bld/spacy_1699195026989/work
209
+ spacy-conll==4.0.1
210
+ spacy-curated-transformers==0.2.1
211
+ spacy-legacy @ file:///home/conda/feedstock_root/build_artifacts/spacy-legacy_1674550301837/work
212
+ spacy-loggers @ file:///home/conda/feedstock_root/build_artifacts/spacy-loggers_1694527114282/work
213
+ spacy-pkuseg==0.0.33
214
+ SQLAlchemy==2.0.29
215
+ srsly @ file:///Users/runner/miniforge3/conda-bld/srsly_1695654012378/work
216
+ starlette @ file:///home/conda/feedstock_root/build_artifacts/starlette-recipe_1684245096404/work
217
+ striprtf==0.0.26
218
+ sympy==1.13.1
219
+ tabulate==0.9.0
220
+ tenacity==8.2.3
221
+ tensorboard @ file:///home/conda/feedstock_root/build_artifacts/tensorboard_1707486779203/work/tensorboard-2.15.2-py3-none-any.whl#sha256=a6f6443728064d962caea6d34653e220e34ef8df764cb06a8212c17e1a8f0622
222
+ tensorboard-data-server @ file:///Users/runner/miniforge3/conda-bld/tensorboard-data-server_1695425507462/work/tensorboard_data_server-0.7.0-py3-none-any.whl#sha256=d64d6779d77d7ddd0e3ca6179f5719a9609921e96c89c5b6e126fbe37cb6a9cb
223
+ tensordict==0.3.2
224
+ tensorflow @ file:///Users/uwe/Development/conda-forge/tensorflow-feedstock/tensorflow-feedstock-osx_arm64_numpy1.22python3.10.____cpython/miniforge3/conda-bld/tensorflow-split_1705684890772/work/tensorflow_pkg/tensorflow-2.15.0-cp310-cp310-macosx_11_0_arm64.whl#sha256=29950c12963854214b40eb023bea9ad684abeb97c26d33c4184b9c4536f9c4a5
225
+ tensorflow-addons==0.21.0
226
+ tensorflow_estimator @ file:///Users/uwe/Development/conda-forge/tensorflow-feedstock/tensorflow-feedstock-osx_arm64_numpy1.22python3.10.____cpython/miniforge3/conda-bld/tensorflow-split_1705684890772/work/tensorflow-estimator/wheel_dir/tensorflow_estimator-2.15.0-py2.py3-none-any.whl#sha256=d475182953e6133aade7c6c9271d7beffe86758164ca1d39d9758018c7c82747
227
+ termcolor @ file:///home/conda/feedstock_root/build_artifacts/termcolor_1704357939450/work
228
+ thinc @ file:///Users/runner/miniforge3/conda-bld/thinc_1695623306101/work
229
+ threadpoolctl==3.4.0
230
+ thulac==0.2.2
231
+ tiktoken==0.6.0
232
+ tokenizers==0.19.1
233
+ tomlkit @ file:///home/conda/feedstock_root/build_artifacts/tomlkit_1690458286251/work
234
+ toolz @ file:///home/conda/feedstock_root/build_artifacts/toolz_1733736030883/work
235
+ torch==2.6.0
236
+ torchaudio==2.6.0
237
+ torcheval==0.0.7
238
+ torchvision==0.17.2
239
+ tornado @ file:///private/var/folders/k1/30mswbxs7r1g6zwn8y4fyt500000gp/T/abs_28d93aezp2/croot/tornado_1690848278715/work
240
+ tqdm @ file:///home/conda/feedstock_root/build_artifacts/tqdm_1691671248568/work
241
+ transformers==4.40.0
242
+ typeguard==2.13.3
243
+ typer==0.15.1
244
+ typing-inspect==0.9.0
245
+ typing_extensions @ file:///home/conda/feedstock_root/build_artifacts/typing_extensions_1712329955671/work
246
+ tzdata==2023.3
247
+ unicodedata2 @ file:///Users/runner/miniforge3/conda-bld/unicodedata2_1695848003431/work
248
+ urllib3==2.0.4
249
+ uvicorn @ file:///home/conda/feedstock_root/build_artifacts/uvicorn_1734292939144/work
250
+ wasabi @ file:///Users/runner/miniforge3/conda-bld/wasabi_1686131568298/work
251
+ weasel @ file:///home/conda/feedstock_root/build_artifacts/weasel_1699295455892/work
252
+ websockets @ file:///Users/runner/miniforge3/conda-bld/websockets_1695410052706/work
253
+ Werkzeug @ file:///home/conda/feedstock_root/build_artifacts/werkzeug_1712099422348/work
254
+ wikipedia==1.4.0
255
+ wrapt @ file:///Users/runner/miniforge3/conda-bld/wrapt_1666806247108/work
256
+ xxhash @ file:///Users/runner/miniforge3/conda-bld/python-xxhash_1696486346782/work
257
+ xyzservices @ file:///home/conda/feedstock_root/build_artifacts/xyzservices_1725366347586/work
258
+ yarl==1.9.2
259
+ zipp @ file:///home/conda/feedstock_root/build_artifacts/zipp_1689374466814/work
readme.md ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Recipe RAG System
2
+
3
+ **Author:** [Your Name]
4
+ **Date:** February 2025
5
+
6
+ ## Project Overview
7
+
8
+ A production-ready Retrieval-Augmented Generation (RAG) system specialized for recipe queries. This system combines vector search with language model generation to provide contextually relevant recipe information based on user queries.
9
+
10
+ ### Key Features
11
+
12
+ - **Efficient Information Retrieval**: Semantic search using BGE embeddings to find the most relevant recipes
13
+ - **Contextual Response Generation**: LLM-powered responses that incorporate retrieved recipe information
14
+ - **Scalable API Architecture**: FastAPI-based RESTful API with proper error handling and documentation
15
+ - **Fully Containerized**: Docker and Docker Compose setup for easy deployment
16
+ - **Comprehensive Testing**: Unit and integration tests covering core functionalities
17
+ - **Monitoring Setup**: Prometheus and Grafana integration (optional)
18
+
19
+ ## Architecture
20
+
21
+ The system consists of two main components:
22
+
23
+ 1. **Recipe RAG Core**: Handles document indexing, retrieval, and response generation
24
+ 2. **API Server**: Exposes the RAG functionality through REST endpoints
25
+
26
+ ## Technology Stack
27
+
28
+ - **Vector Search**: LlamaIndex with HuggingFace embeddings (BGE model)
29
+ - **Language Model**: Microsoft Phi-3-mini-4k-instruct
30
+ - **API Framework**: FastAPI
31
+ - **Dataset**: HuggingFace Recipe NLG Lite
32
+ - **Containerization**: Docker & Docker Compose
33
+ - **Testing**: pytest & unittest
34
+ - **Monitoring**: Prometheus & Grafana (optional)
35
+
36
+ ## Skills Demonstrated
37
+
38
+ - **Natural Language Processing**: Implementation of a RAG system using modern techniques
39
+ - **API Development**: RESTful API design with proper validation and error handling
40
+ - **Software Engineering**: Clean code architecture with appropriate abstractions
41
+ - **MLOps Practices**: Containerization, testing, and monitoring setup
42
+ - **Python Development**: Type hints, logging, configuration management
43
+
44
+ ## Deployment
45
+
46
+ The system can be deployed with a single command:
47
+
48
+ ```bash
49
+ docker-compose up -d
50
+ ```
51
+
52
+ This will start the API server and (optionally) the monitoring stack.
53
+
54
+ ## Future Improvements
55
+
56
+ - Implement caching layer for frequently asked questions
57
+ - Add a vector database for improved scalability
58
+ - Implement a feedback loop for continuous improvement
59
+ - Develop a user-friendly front-end interface