File size: 3,078 Bytes
6f9dac7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
# import os
# from fastapi import FastAPI, Request, HTTPException
# from fastapi.responses import HTMLResponse, JSONResponse
# from fastapi.staticfiles import StaticFiles
# from fastapi.templating import Jinja2Templates

# app = FastAPI()

# # Configure paths
# BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# STATIC_DIR = os.path.join(BASE_DIR, "frontend", "static")
# TEMPLATE_DIR = os.path.join(BASE_DIR, "frontend", "templates")

# # Debug paths
# print(f"Static files directory: {STATIC_DIR} (exists: {os.path.exists(STATIC_DIR)})")
# print(f"Templates directory: {TEMPLATE_DIR} (exists: {os.path.exists(TEMPLATE_DIR)})")

# app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
# templates = Jinja2Templates(directory=TEMPLATE_DIR)

# @app.get("/", response_class=HTMLResponse)
# async def home(request: Request):
#     return templates.TemplateResponse("index.html", {"request": request})

# # Add API endpoint for recipe generation
# @app.post("/api/generate-recipe")
# async def generate_recipe(request: Request):
#     try:
#         data = await request.json()
#         ingredients = data.get("ingredients", "")
#         diet = data.get("diet")
#         cuisine = data.get("cuisine")
        
#         # Here you would add your actual recipe generation logic
#         # For now, returning a mock response
#         return JSONResponse({
#             "recipe": f"Mock recipe using: {ingredients}\n\n1. Do something\n2. Then another step\n3. Serve hot!",
#             "image_url": "https://via.placeholder.com/600x400?text=Recipe+Image"
#         })
#     except Exception as e:
#         raise HTTPException(status_code=400, detail=str(e))


from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
from langchain_huggingface import ChatHuggingFace, HuggingFacePipeline
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder

model_id = "mistralai/Mistral-7B-Instruct-v0.3"

# Load tokenizer + model
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
    model_id,
    device_map="auto",
    torch_dtype="auto"
)

# Create text-generation pipeline
pipe = pipeline(
    "text-generation",
    model=model,
    tokenizer=tokenizer,
    max_new_tokens=200,
    do_sample=True,
    temperature=0.7
)

llm = HuggingFacePipeline(pipeline=pipe)
modell = ChatHuggingFace(llm = llm)

# Wrap in LangChain Chat Model

# Prompt Template
prompt = ChatPromptTemplate.from_messages([
    ("system", "You are a wise assistant reply quickly to users prompt."),
    MessagesPlaceholder("history"),
    ("user", "{input}")
])

history = []

while True:
    user_input = input("You: ")
    if user_input in ["stop", "exit"]:
        break

    chain_input = {
        "input": user_input,
        "history": history
    }

    # Generate answer
    response = modell.invoke(prompt.invoke(chain_input))

    print("AI:", response.content)

    # Maintain chat memory
    history.append(("user", user_input))
    history.append(("assistant", response.content))