NPChef / backend /temp.py
mehulr910's picture
initial commit
6f9dac7
# import os
# from fastapi import FastAPI, Request, HTTPException
# from fastapi.responses import HTMLResponse, JSONResponse
# from fastapi.staticfiles import StaticFiles
# from fastapi.templating import Jinja2Templates
# app = FastAPI()
# # Configure paths
# BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# STATIC_DIR = os.path.join(BASE_DIR, "frontend", "static")
# TEMPLATE_DIR = os.path.join(BASE_DIR, "frontend", "templates")
# # Debug paths
# print(f"Static files directory: {STATIC_DIR} (exists: {os.path.exists(STATIC_DIR)})")
# print(f"Templates directory: {TEMPLATE_DIR} (exists: {os.path.exists(TEMPLATE_DIR)})")
# app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
# templates = Jinja2Templates(directory=TEMPLATE_DIR)
# @app.get("/", response_class=HTMLResponse)
# async def home(request: Request):
# return templates.TemplateResponse("index.html", {"request": request})
# # Add API endpoint for recipe generation
# @app.post("/api/generate-recipe")
# async def generate_recipe(request: Request):
# try:
# data = await request.json()
# ingredients = data.get("ingredients", "")
# diet = data.get("diet")
# cuisine = data.get("cuisine")
# # Here you would add your actual recipe generation logic
# # For now, returning a mock response
# return JSONResponse({
# "recipe": f"Mock recipe using: {ingredients}\n\n1. Do something\n2. Then another step\n3. Serve hot!",
# "image_url": "https://via.placeholder.com/600x400?text=Recipe+Image"
# })
# except Exception as e:
# raise HTTPException(status_code=400, detail=str(e))
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
from langchain_huggingface import ChatHuggingFace, HuggingFacePipeline
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
model_id = "mistralai/Mistral-7B-Instruct-v0.3"
# Load tokenizer + model
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
model_id,
device_map="auto",
torch_dtype="auto"
)
# Create text-generation pipeline
pipe = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_new_tokens=200,
do_sample=True,
temperature=0.7
)
llm = HuggingFacePipeline(pipeline=pipe)
modell = ChatHuggingFace(llm = llm)
# Wrap in LangChain Chat Model
# Prompt Template
prompt = ChatPromptTemplate.from_messages([
("system", "You are a wise assistant reply quickly to users prompt."),
MessagesPlaceholder("history"),
("user", "{input}")
])
history = []
while True:
user_input = input("You: ")
if user_input in ["stop", "exit"]:
break
chain_input = {
"input": user_input,
"history": history
}
# Generate answer
response = modell.invoke(prompt.invoke(chain_input))
print("AI:", response.content)
# Maintain chat memory
history.append(("user", user_input))
history.append(("assistant", response.content))