Justin-J commited on
Commit
c614398
·
1 Parent(s): 3893725

Added my Project Files

Browse files
Files changed (5) hide show
  1. app.py +205 -0
  2. beautiful image.png +0 -0
  3. encoder.pkl +3 -0
  4. requirements.txt +5 -0
  5. scaler.pkl +3 -0
app.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Loading key libraries
2
+ import streamlit as st
3
+ import os
4
+ import pickle
5
+ import numpy as np
6
+ import pandas as pd
7
+ import re
8
+ from pathlib import Path
9
+ from PIL import Image
10
+ from category_encoders.binary import BinaryEncoder
11
+ from sklearn.preprocessing import StandardScaler
12
+
13
+ # Setting the page configurations
14
+ st.set_page_config(page_title= "Sales Prediction Forecasting", page_icon= ":heavy_dollar_sign:", layout= "wide", initial_sidebar_state= "auto")
15
+
16
+ # Setting the page title
17
+ st.title("Grocery Store Sales Time Series Model Prediction")
18
+
19
+ # Function to load the dataset
20
+ @st.cache_resource
21
+ def load_data(relative_path):
22
+ data= pd.read_csv(relative_path, index_col= 0)
23
+ #merged["date"] = pd.to_datetime(merged["date"])
24
+ return data
25
+
26
+
27
+
28
+
29
+ # Loading the base dataframe
30
+ rpath = r"merged_train_data.csv"
31
+ data = load_data(rpath)
32
+
33
+
34
+
35
+ # Load the model and encoder ans scaler
36
+ model = pickle.load(open("model.pkl", "rb"))
37
+ encoder = pickle.load(open("encoder.pkl", "rb"))
38
+ scaler = pickle.load(open("scaler.pkl", "rb"))
39
+
40
+ # main sections of the app
41
+ header = st.container()
42
+ dataset = st.container()
43
+ features_and_output = st.container()
44
+
45
+
46
+
47
+
48
+ # Designing the sidebar
49
+ st.sidebar.header("Brief overview of the Columns")
50
+ st.sidebar.markdown("""
51
+ - **store_nbr** identifies the store at which the products are sold.
52
+ - **family** identifies the type of product sold.
53
+ - **sales** is the total sales for a product family at a particular store at a given date. Fractional values are possible since products can be sold in fractional units(1.5 kg of cheese, for instance, as opposed to 1 bag of chips).
54
+ - **onpromotion** gives the total number of items in a product family that were being promoted at a store at a given date.
55
+ - **date** is the date on which a transaction / sale was made
56
+ - **city** is the city in which the store is located
57
+ - **state** is the state in which the store is located
58
+ - **store_type** is the type of store, based on Corporation Favorita's own type system
59
+ - **cluster** is a grouping of similar stores.
60
+ - **oil_price** is the daily oil price
61
+ """)
62
+
63
+ # Structuring the dataset section
64
+ with dataset:
65
+ if dataset.checkbox("Preview the dataset"):
66
+ dataset.write(data.head())
67
+ dataset.write("Further information will preview when take a look at the sidebar")
68
+ dataset.write("---")
69
+
70
+
71
+
72
+
73
+ # Icon for the page
74
+ image = Image.open(r"beautiful image.png")
75
+
76
+ # inputs from the user
77
+ form = st.form(key="information", clear_on_submit=True)
78
+
79
+ # Structuring the header section
80
+ with header:
81
+ header.write("This an application to build a model that more accurately predicts the unit sales for thousands of items sold at different Favorita stores")
82
+
83
+ header.image(image)
84
+
85
+ header.write("---")
86
+
87
+
88
+
89
+
90
+ # Structuring the features and output section
91
+ with features_and_output:
92
+ features_and_output.subheader("Inputs")
93
+ features_and_output.write("This section captures your input to be used in predictions")
94
+
95
+ left_col, mid_col, right_col = features_and_output.columns(3)
96
+
97
+ # Designing the input section of the app
98
+ with form:
99
+ left_col.markdown("***Combined data on Product and Transaction***")
100
+ date = left_col.date_input("Select a date:")
101
+ family = left_col.selectbox("Product family:", options= sorted(list(data["family"].unique())))
102
+ onpromotion = left_col.number_input("Number of products on promotion:", min_value= data["onpromotion"].min(), value= data["onpromotion"].min())
103
+ city = left_col.selectbox("City:", options= sorted(set(data["city"])))
104
+
105
+ mid_col.markdown("***Data on Location and type***")
106
+ store_nbr = mid_col.selectbox("Store number:", options= sorted(set(data["store_nbr"])))
107
+ type_x = mid_col.radio("type_x:", options= sorted(set(data["type_x"])), horizontal= True)
108
+ type_y = mid_col.radio("type_y:", options= sorted(set(data["type_y"])), horizontal= True)
109
+ cluster = mid_col.select_slider("Store cluster:", options= sorted(set(data["cluster"])))
110
+ state = mid_col.selectbox("State:", options= sorted(set(data["state"])))
111
+
112
+ right_col.markdown("***Data on Economical Factors***")
113
+ oil_price = right_col.number_input("Oil price:", min_value= data["oil_price"].min(), value= data["oil_price"].min())
114
+
115
+ # Submission point
116
+ submitted = form.form_submit_button(label= "Submit button")
117
+
118
+ if submitted:
119
+ with features_and_output:
120
+ input_features = {
121
+ "date":[date],
122
+ "store_nbr": [store_nbr],
123
+ "family": [family],
124
+ "onpromotion": [onpromotion],
125
+ "city": [city],
126
+ "state": [state],
127
+ "type_x": [type_x],
128
+ "cluster":[cluster],
129
+ "oil_price": [oil_price],
130
+ "type_y": [type_y],
131
+ }
132
+
133
+
134
+
135
+ # Define the function to make predictions
136
+ def predict_sales(input_data, input_df):
137
+ # defining categories and numeric columns
138
+ categoric_columns = ['family', 'city', 'state', 'type_y', 'type_x']
139
+ columns = list(input_df.columns)
140
+ numeric_columns = [i for i in columns if i not in categoric_columns]
141
+ scaled_num = scaler.fit_transform(input_df[numeric_columns])
142
+ encoded_cat = encoder.transform(input_df[categoric_columns])
143
+ input_data = pd.concat([scaled_num, encoded_cat], axis=1)
144
+ # convert input_data to a numpy array before flattening to convert it back to a 2D array
145
+ input_data = input_data.to_numpy()
146
+ prediction = model.predict(input_data.flatten().reshape(1, -1))
147
+ return prediction
148
+
149
+ #Convert input parameters to a pandas DataFrame
150
+ input_dict = {
151
+ 'store_nbr': store_nbr,
152
+ 'cluster': cluster,
153
+ 'city': city,
154
+ 'state': state,
155
+ 'family': family,
156
+ 'type_x': type_x,
157
+ 'type_y': type_y,
158
+ 'onpromotion': onpromotion,
159
+ 'oil_price': oil_price,
160
+ 'date' : date
161
+ }
162
+ input_df = pd.DataFrame([input_dict])
163
+
164
+
165
+ @st.cache_resource
166
+ def getDateFeatures(df):
167
+ df['date'] = pd.to_datetime(df['date'], errors='coerce')
168
+ df['month'] = df['date'].dt.month
169
+ df['day_of_month'] = df['date'].dt.day
170
+ df['day_of_year'] = df['date'].dt.dayofyear
171
+ df['week_of_year'] = df['date'].dt.isocalendar().week
172
+ df['week_of_year'] = df['week_of_year'].astype(float)
173
+ df['day_of_week'] = df['date'].dt.dayofweek
174
+ df['year'] = df['date'].dt.year
175
+ df["is_weekend"] = np.where(df['day_of_week'] > 4, 1, 0)
176
+ df['is_month_start'] = df['date'].dt.is_month_start.astype(int)
177
+ df['quarter'] = df['date'].dt.quarter
178
+ df['is_month_end'] = df['date'].dt.is_month_end.astype(int)
179
+ df['is_quarter_start'] = df['date'].dt.is_quarter_start.astype(int)
180
+ df['is_quarter_end'] = df['date'].dt.is_quarter_end.astype(int)
181
+ df['is_year_start'] = df['date'].dt.is_year_start.astype(int)
182
+ df['is_year_end'] = df['date'].dt.is_year_end.astype(int)
183
+
184
+ df["season"] = np.where(df.month.isin([12,1,2]), 0, 1)
185
+ df["season"] = np.where(df.month.isin([6,7,8]), 2, df["season"])
186
+ df["season"] = pd.Series(np.where(df.month.isin([9, 10, 11]), 3, df["season"])).astype("int8")
187
+ df['pay_day'] = np.where((df['day_of_month']==15) | (df['is_month_end']==1), 1, 0)
188
+ df['earthquake_impact'] = np.where(df['date'].isin(
189
+ pd.date_range(start='2016-04-16', end='2016-12-31', freq='D')), 1, 0)
190
+
191
+ return df
192
+ input_df = getDateFeatures(input_df)
193
+ input_df = input_df.drop(columns= ['date'], axis=1)
194
+
195
+ # Make prediction and show results
196
+ if st.button('Predict'):
197
+ prediction = predict_sales(input_df.values, input_df)
198
+ st.success('The predicted sales amount is $' + str(round(prediction[0],2)))
199
+
200
+
201
+ # ----- Defining and structuring the footer
202
+ footer = st.expander("**Subsequent Information**")
203
+ with footer:
204
+ if footer.button("Special Thanks"):
205
+ footer.markdown("*We want to express our appreciation and gratitude to Emmanuel,Racheal, Mavies and Richard for their great insights and contributions!*")
beautiful image.png ADDED
encoder.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83eee99c982dfe47d4a410eaae318efae7b927a6b1bcbbefffaae7693c854556
3
+ size 10242
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ pandas==1.5.3
2
+ numpy==1.24.2
3
+ scikit-learn==1.2.2
4
+ pytest
5
+ category_encoders
scaler.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b21ec3396db4610a47906f21c3c5c8e567f633fbd96c2725cdaf3606c5c79716
3
+ size 1370