AlgoX commited on
Commit
9427fbb
·
1 Parent(s): 179a0c4
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -680,7 +680,7 @@ models = {}
680
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
681
 
682
  # Load Hawk model
683
- hawk_config = load_model_config("hawk")
684
  input_size_hawk = get_input_size(hawk_config)
685
  hawk_model = HawkPredictor(
686
  input_size=input_size_hawk,
@@ -695,7 +695,7 @@ hawk_model.eval()
695
  models["hawk"] = hawk_model
696
 
697
  # Load Mamba2 model
698
- mamba_config = load_model_config("mamba")
699
  input_size_mamba = get_input_size(mamba_config)
700
  mamba_model = Mamba2Predictor(
701
  input_size=input_size_mamba,
@@ -713,7 +713,7 @@ mamba_model.eval()
713
  models["mamba"] = mamba_model
714
 
715
  # Load xLSTM model
716
- xlstm_config = load_model_config("xlstm")
717
  input_size_xlstm = get_input_size(xlstm_config)
718
  xlstm_model = xLSTMPredictor(
719
  input_size=input_size_xlstm,
 
680
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
681
 
682
  # Load Hawk model
683
+ hawk_config = load_model_config("hawk", models_dir="deployment/models")
684
  input_size_hawk = get_input_size(hawk_config)
685
  hawk_model = HawkPredictor(
686
  input_size=input_size_hawk,
 
695
  models["hawk"] = hawk_model
696
 
697
  # Load Mamba2 model
698
+ mamba_config = load_model_config("mamba", models_dir="deployment/models")
699
  input_size_mamba = get_input_size(mamba_config)
700
  mamba_model = Mamba2Predictor(
701
  input_size=input_size_mamba,
 
713
  models["mamba"] = mamba_model
714
 
715
  # Load xLSTM model
716
+ xlstm_config = load_model_config("xlstm", models_dir="deployment/models")
717
  input_size_xlstm = get_input_size(xlstm_config)
718
  xlstm_model = xLSTMPredictor(
719
  input_size=input_size_xlstm,