astacn commited on
Commit
c83db7b
·
verified ·
1 Parent(s): af93c55

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +115 -3
app.py CHANGED
@@ -1,12 +1,124 @@
1
  from flask import Flask, request, jsonify
2
- from sklearn.preprocessing import MinMaxScaler
3
  import pandas as pd
 
 
 
 
 
 
 
 
 
 
4
  import os
5
- from app2 import predict_stock_codes
6
 
7
  app = Flask(__name__)
8
 
9
- # Load the prediction model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  model = CustomModel()
11
 
12
  # Define a function to prepare the data for prediction
 
1
  from flask import Flask, request, jsonify
 
2
  import pandas as pd
3
+ import numpy as np
4
+ import baostock as bs
5
+ from sklearn.preprocessing import MinMaxScaler, StandardScaler
6
+ from sklearn.model_selection import train_test_split
7
+ from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, mean_absolute_error
8
+ from neuralprophet import NeuralProphet, set_log_level
9
+ from torch.optim import Adam
10
+ from torch.nn import LSTM
11
+ import torch
12
+ import torch.nn as nn
13
  import os
14
+
15
 
16
  app = Flask(__name__)
17
 
18
+ # Set log level to ERROR to suppress unnecessary warnings
19
+ set_log_level("ERROR")
20
+
21
+ # Baostock API settings
22
+ bs.login()
23
+
24
+ # Collect historical data
25
+ data = bs.query_history_k_data_plus(
26
+ "sz.000001", # Shanghai Composite Index
27
+ "date,open,high,low,close,volume",
28
+ start_date="2005-05-30",
29
+ end_date="2024-01-31",
30
+ frequency="d"
31
+ )
32
+
33
+ # Convert ResultData object to pandas DataFrame
34
+ data_list = []
35
+ while (data.error_code == '0') & data.next():
36
+ # 获取一条记录,将记录合并在一起
37
+ data_list.append(data.get_row_data())
38
+ data_df = pd.DataFrame(data_list, columns=data.fields)
39
+
40
+ # Convert 'open' and 'close' columns to numeric type
41
+ data_df['open'] = pd.to_numeric(data_df['open'])
42
+ data_df['close'] = pd.to_numeric(data_df['close'])
43
+
44
+ # Filter out stocks that meet the conditions
45
+ # data_df = data_df[(data_df["open"] >= 0.98 * data_df["close"].shift(1).fillna(0)) & (data_df["open"] <= 1.02 * data_df["close"].shift(1).fillna(0))]
46
+ # data_df = data_df[(data_df["high"] == data_df["close"]) & (data_df["low"] == data_df["close"])] # limit-up condition
47
+ # data_df = data_df[(data_df["open"]!= 0) & (data_df["close"]!= 0)] # exclude zero prices
48
+
49
+ # Check if data_df is empty before proceeding
50
+ if data_df.empty:
51
+ print("Warning: data_df is empty after filtering. Check your filtering conditions.")
52
+ else:
53
+ # Now use data_df (the DataFrame) in train_test_split
54
+ train_data, val_data = train_test_split(data_df, test_size=0.2, random_state=42)
55
+
56
+ # Define the custom model
57
+ class CustomModel(nn.Module):
58
+ def __init__(self):
59
+ super(CustomModel, self).__init__()
60
+ self.neural_prophet = NeuralProphet(
61
+ n_forecasts=1,
62
+ n_lags=30,
63
+ n_changepoints=10,
64
+ changepoints_range=0.8,
65
+ learning_rate=1e-3,
66
+ optimizer=Adam,
67
+ )
68
+ self.lstm = LSTM(input_size=1, hidden_size=128, num_layers=1, batch_first=True)
69
+
70
+ def forward(self, x):
71
+ x = self.neural_prophet(x)
72
+ x = self.lstm(x)
73
+ return x
74
+
75
+ def predict(self, df):
76
+ """
77
+ Custom predict method for CustomModel. Utilizes NeuralProphet's prediction.
78
+
79
+ Args:
80
+ df: The input DataFrame for prediction.
81
+
82
+ Returns:
83
+ Predictions from the NeuralProphet model.
84
+ """
85
+ # Assuming your NeuralProphet model expects a DataFrame in a specific format
86
+ # You might need to adjust this based on your data and model setup
87
+ future = self.neural_prophet.make_future_dataframe(df, periods=1) # Adjust periods as needed
88
+ forecast = self.neural_prophet.predict(future)
89
+ return forecast['yhat1'].values # Or access the relevant prediction column
90
+
91
+ # Instantiate the model
92
+ model = CustomModel()
93
+
94
+ # Define loss function and optimizer
95
+ criterion = nn.BCELoss()
96
+ optimizer = Adam(model.parameters(), lr=1e-3)
97
+
98
+ # Training loop
99
+ def fit(model, train_data, epochs, batch_size, validation_data):
100
+ """
101
+ Custom training loop for the CustomModel.
102
+
103
+ Args:
104
+ model: The CustomModel instance.
105
+ train_data: Training data.
106
+ epochs: Number of training epochs.
107
+ batch_size: Batch size for training.
108
+ validation_data: Validation data.
109
+ """
110
+ for epoch in range(epochs):
111
+ model.train() # Set model to training mode
112
+ for batch_idx, (data, target) in enumerate(train_data): # Assuming train_data is a DataLoader
113
+ optimizer.zero_grad() # Zero the gradients
114
+ output = model(data) # Forward pass
115
+ loss = criterion(output, target) # Calculate loss
116
+ loss.backward() # Backpropagate gradients
117
+ optimizer.step() # Update model parameters
118
+
119
+ # Print training progress
120
+ if batch_idx % 100 == 0:
121
+ pass Load the prediction model
122
  model = CustomModel()
123
 
124
  # Define a function to prepare the data for prediction