ositamiles commited on
Commit
91cf555
·
verified ·
1 Parent(s): 9cacbcb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -6,7 +6,7 @@ from sklearn.preprocessing import LabelEncoder, StandardScaler
6
  from sklearn.model_selection import train_test_split
7
  import tensorflow as tf
8
  from tensorflow.keras.models import Sequential, Model
9
- from tensorflow.keras.layers import LSTM, Dense, Input, MultiHeadAttention, LayerNormalization
10
  from tensorflow.keras.optimizers import Adam
11
  import joblib
12
  import os
@@ -57,6 +57,13 @@ def create_lstm_model(input_shape):
57
  return model
58
 
59
  # Transformer Model
 
 
 
 
 
 
 
60
  def transformer_encoder(inputs, head_size, num_heads, ff_dim, dropout=0):
61
  x = MultiHeadAttention(key_dim=head_size, num_heads=num_heads, dropout=dropout)(inputs, inputs)
62
  x = LayerNormalization(epsilon=1e-6)(x)
@@ -65,13 +72,6 @@ def transformer_encoder(inputs, head_size, num_heads, ff_dim, dropout=0):
65
  x = Dense(inputs.shape[-1])(x)
66
  return LayerNormalization(epsilon=1e-6)(x + res)
67
 
68
- def create_transformer_model(input_shape):
69
- inputs = Input(shape=input_shape)
70
- x = transformer_encoder(inputs, head_size=256, num_heads=4, ff_dim=4, dropout=0.1)
71
- x = GlobalAveragePooling1D()(x)
72
- outputs = Dense(1)(x)
73
- return Model(inputs, outputs)
74
-
75
  # RL Environment
76
  class PricingEnv(gym.Env):
77
  def __init__(self, data):
 
6
  from sklearn.model_selection import train_test_split
7
  import tensorflow as tf
8
  from tensorflow.keras.models import Sequential, Model
9
+ from tensorflow.keras.layers import LSTM, Dense, Input, MultiHeadAttention, LayerNormalization, GlobalAveragePooling1D
10
  from tensorflow.keras.optimizers import Adam
11
  import joblib
12
  import os
 
57
  return model
58
 
59
  # Transformer Model
60
+ def create_transformer_model(input_shape):
61
+ inputs = Input(shape=input_shape)
62
+ x = transformer_encoder(inputs, head_size=256, num_heads=4, ff_dim=4, dropout=0.1)
63
+ x = GlobalAveragePooling1D()(x)
64
+ outputs = Dense(1)(x)
65
+ return Model(inputs, outputs)
66
+
67
  def transformer_encoder(inputs, head_size, num_heads, ff_dim, dropout=0):
68
  x = MultiHeadAttention(key_dim=head_size, num_heads=num_heads, dropout=dropout)(inputs, inputs)
69
  x = LayerNormalization(epsilon=1e-6)(x)
 
72
  x = Dense(inputs.shape[-1])(x)
73
  return LayerNormalization(epsilon=1e-6)(x + res)
74
 
 
 
 
 
 
 
 
75
  # RL Environment
76
  class PricingEnv(gym.Env):
77
  def __init__(self, data):