Preethamreddy799 commited on
Commit
b99bcad
·
1 Parent(s): 0f99604

new update

Browse files
Files changed (1) hide show
  1. app.py +27 -10
app.py CHANGED
@@ -1,11 +1,12 @@
1
  import streamlit as st
2
- import pickle
3
  import numpy as np
4
- from huggingface_hub import hf_hub_download
5
- import os
6
  from tensorflow.keras.models import load_model
 
7
 
8
- def load_model_test_steps():
 
9
  repo_id = 'Preethamreddy799/NLP_MODEL'
10
  filename = 'model_test_steps.h5' # Assuming the model is in HDF5 format
11
 
@@ -18,13 +19,29 @@ def load_model_test_steps():
18
  print(f"Model loaded successfully from {cached_model_path}")
19
  return model
20
 
21
- model = load_model_test_steps()
22
 
23
- # Function to generate predictions
24
- def generate_test_steps(acceptance_criteria, test_data):
25
- # Example input preprocessing (tokenization/embedding logic should match training)
26
- input_features = np.array([len(acceptance_criteria), len(test_data)]).reshape(1, -1)
 
 
 
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  # Generate prediction
29
  predicted_steps = model.predict(input_features)
30
  return predicted_steps
@@ -50,4 +67,4 @@ if st.button("Generate Test Steps"):
50
  else:
51
  st.error("Model not loaded. Please check the model repository and file.")
52
  else:
53
- st.warning("Please fill in both Acceptance Criteria and Test Data.")
 
1
  import streamlit as st
 
2
  import numpy as np
3
+ from tensorflow.keras.preprocessing.text import Tokenizer
4
+ from tensorflow.keras.preprocessing.sequence import pad_sequences
5
  from tensorflow.keras.models import load_model
6
+ from huggingface_hub import hf_hub_download
7
 
8
+ # Load model from Hugging Face
9
+ def load_model():
10
  repo_id = 'Preethamreddy799/NLP_MODEL'
11
  filename = 'model_test_steps.h5' # Assuming the model is in HDF5 format
12
 
 
19
  print(f"Model loaded successfully from {cached_model_path}")
20
  return model
21
 
22
+ model = load_model()
23
 
24
+ # Initialize Tokenizer (Should match training tokenizer)
25
+ tokenizer = Tokenizer(num_words=1000)
26
+
27
+ # Function to preprocess text data
28
+ def preprocess_text(input_text):
29
+ # Fit tokenizer (you should ideally fit the tokenizer during training and save it)
30
+ tokenizer.fit_on_texts([input_text])
31
 
32
+ # Convert the input text to a sequence of integers
33
+ sequence = tokenizer.texts_to_sequences([input_text])
34
+
35
+ # Pad the sequence to ensure uniform input size
36
+ input_features = pad_sequences(sequence, maxlen=100) # Ensure length matches the expected input (100)
37
+
38
+ return input_features
39
+
40
+ # Function to generate test steps
41
+ def generate_test_steps(acceptance_criteria, test_data):
42
+ # Preprocess the input text
43
+ input_features = preprocess_text(f"{acceptance_criteria} {test_data}")
44
+
45
  # Generate prediction
46
  predicted_steps = model.predict(input_features)
47
  return predicted_steps
 
67
  else:
68
  st.error("Model not loaded. Please check the model repository and file.")
69
  else:
70
+ st.warning("Please fill in both Acceptance Criteria and Test Data.")