anoopreddyyeddula commited on
Commit
5347c3a
·
verified ·
1 Parent(s): 86751c4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -19
app.py CHANGED
@@ -2,20 +2,14 @@ import gradio as gr
2
  import pandas as pd
3
  import joblib
4
  from transformers import pipeline
5
- import torch
6
 
7
- # Load models once at startup
8
  product_models = joblib.load('models/inventory_forecaster.pkl')
 
9
 
10
- # Use a smaller, faster model and specify device
11
- device = 0 if torch.cuda.is_available() else -1
12
- llm = pipeline("text2text-generation",
13
- model="google/flan-t5-small", # smaller model
14
- device=device) # use GPU if available
15
-
16
- # Cache the function to avoid recomputing for same inputs
17
- @gr.cache_examples
18
  def inventory_advisor(product_id, current_inventory, last_day_sales):
 
19
  if product_id not in product_models:
20
  return f"❌ Error: Product ID {product_id} not found in models."
21
 
@@ -24,13 +18,11 @@ def inventory_advisor(product_id, current_inventory, last_day_sales):
24
 
25
  prompt = (f"Current inventory is {current_inventory} units. "
26
  f"Predicted sales for next week is {int(future_sales)} units. "
27
- f"Should restocking be done? Give a brief restocking advice in 2 sentences.")
 
 
28
 
29
- response = llm(prompt,
30
- max_length=50, # reduced max length
31
- num_beams=2) # faster beam search
32
-
33
- return f"🔮 Predicted Sales Next Week: {int(future_sales)} units\n\n🛒 Advice:\n{response[0]['generated_text']}"
34
 
35
  iface = gr.Interface(
36
  fn=inventory_advisor,
@@ -41,9 +33,7 @@ iface = gr.Interface(
41
  ],
42
  outputs="text",
43
  title="📦 Real-Time Inventory Management (Multi-Product)",
44
- description="Enter product ID, current stock, and yesterday's sales. Get AI-based restocking advice!",
45
- examples=[[1, 100, 50], [2, 200, 75]], # Add example inputs
46
- cache_examples=True
47
  )
48
 
49
  if __name__ == "__main__":
 
2
  import pandas as pd
3
  import joblib
4
  from transformers import pipeline
 
5
 
6
+ # Load all ML models
7
  product_models = joblib.load('models/inventory_forecaster.pkl')
8
+ llm = pipeline("text2text-generation", model="google/flan-t5-base")
9
 
10
+ # Function to predict and generate restocking advice
 
 
 
 
 
 
 
11
  def inventory_advisor(product_id, current_inventory, last_day_sales):
12
+ # Select correct model
13
  if product_id not in product_models:
14
  return f"❌ Error: Product ID {product_id} not found in models."
15
 
 
18
 
19
  prompt = (f"Current inventory is {current_inventory} units. "
20
  f"Predicted sales for next week is {int(future_sales)} units. "
21
+ f"Should restocking be done? Suggest a human-readable restocking advice.")
22
+
23
+ response = llm(prompt, max_length=100)[0]['generated_text']
24
 
25
+ return f"🔮 Predicted Sales Next Week: {int(future_sales)} units\n\n🛒 Advice:\n{response}"
 
 
 
 
26
 
27
  iface = gr.Interface(
28
  fn=inventory_advisor,
 
33
  ],
34
  outputs="text",
35
  title="📦 Real-Time Inventory Management (Multi-Product)",
36
+ description="Enter product ID, current stock, and yesterday's sales. Get AI-based restocking advice!"
 
 
37
  )
38
 
39
  if __name__ == "__main__":