Muhammad Musa Zulfiqar commited on
Commit
38c37d9
·
1 Parent(s): 2cf8a90

project added

Browse files
Files changed (3) hide show
  1. Dockerfile +15 -0
  2. app.py +86 -0
  3. requirements.txt +4 -0
Dockerfile ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use the official Python image
2
+ FROM python:3.9
3
+
4
+ # Set the working directory inside the container
5
+ WORKDIR /app
6
+
7
+ # Copy and install dependencies
8
+ COPY requirements.txt .
9
+ RUN pip install --no-cache-dir -r requirements.txt
10
+
11
+ # Copy the app code
12
+ COPY . .
13
+
14
+ # Expose the API on port 7860
15
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
app.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ from fastapi import FastAPI, HTTPException
4
+ from fastapi.middleware.cors import CORSMiddleware
5
+ from pydantic import BaseModel
6
+ from transformers import AutoModelForCausalLM, AutoTokenizer
7
+ import torch
8
+
9
+ # Initialize FastAPI app
10
+ app = FastAPI()
11
+
12
+ # Enable CORS (Allow all origins)
13
+ app.add_middleware(
14
+ CORSMiddleware,
15
+ allow_origins=["*"], # Allow all origins
16
+ allow_credentials=True,
17
+ allow_methods=["*"], # Allow all methods
18
+ allow_headers=["*"], # Allow all headers
19
+ )
20
+
21
+ # Load the model
22
+ MODEL_ID = "mzman123/musa-chef-gpt"
23
+ tokenizer = AutoTokenizer.from_pretrained("auhide/chef-gpt-en")
24
+ chef_gpt = AutoModelForCausalLM.from_pretrained(MODEL_ID)
25
+ print("Model Loaded")
26
+
27
+
28
+ # Define request body structure
29
+ class IngredientsRequest(BaseModel):
30
+ ingredients: str
31
+
32
+
33
+ @app.get("/")
34
+ def home():
35
+ return {"message": "Hello World"}
36
+
37
+
38
+ @app.post("/generate")
39
+ def generate_from_model(request: IngredientsRequest):
40
+ """Generate a recipe from the given ingredients."""
41
+
42
+ ingredients = request.ingredients.split(", ")
43
+ print("at backend:", ingredients)
44
+
45
+ if isinstance(ingredients, list):
46
+ prompt_text = f"ingredients>> {', '.join(ingredients)} ; recipe>>"
47
+ prompt_tokens = tokenizer(prompt_text, return_tensors="pt")
48
+
49
+ print("Prompt =", prompt_text)
50
+
51
+ output_test = chef_gpt.generate(
52
+ prompt_tokens.input_ids,
53
+ do_sample=True,
54
+ max_length=1000,
55
+ top_p=0.95,
56
+ attention_mask=prompt_tokens.attention_mask
57
+ )
58
+
59
+ recipe = tokenizer.batch_decode(output_test)[0]
60
+ print("Recipe before regex =", recipe)
61
+
62
+ # Extract recipe part
63
+ pattern = r"recipe>>([\s\S]*?)"
64
+ match = re.search(pattern, recipe)
65
+ if match:
66
+ recipe = recipe[match.end():].strip()
67
+ else:
68
+ print("Recipe section not found.")
69
+
70
+ # Clean unwanted text
71
+ unwanted_phrases = ['<|endoftext|>', '']
72
+ for phrase in unwanted_phrases:
73
+ recipe = recipe.replace(phrase, '')
74
+
75
+ recipe_lines = recipe.split("\n")
76
+
77
+ return {"recipe": recipe_lines}
78
+
79
+ else:
80
+ raise HTTPException(status_code=400, detail="Input data should be a comma-separated string of ingredients")
81
+
82
+
83
+ # Run the FastAPI app (only when executing locally)
84
+ if __name__ == "__main__":
85
+ import uvicorn
86
+ uvicorn.run(app, host="0.0.0.0", port=int(os.getenv("PORT", 7860)))
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ fastapi
2
+ uvicorn
3
+ transformers
4
+ torch