Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import numpy as np | |
| from tensorflow.keras.models import load_model | |
| from huggingface_hub import hf_hub_download | |
| from nltk.corpus import stopwords | |
| from nltk.stem import WordNetLemmatizer | |
| import gensim | |
| import nltk | |
| import os | |
| # Download necessary NLTK resources | |
| nltk.download('stopwords') | |
| nltk.download('wordnet') | |
| # Load stop words and lemmatizer | |
| stop_words = set(stopwords.words('english')) | |
| lemmatizer = WordNetLemmatizer() | |
| # Function to preprocess input text | |
| def preprocess_text(input_text, word2vec_model): | |
| input_text = input_text.lower() | |
| tokens = input_text.split() | |
| tokens = [token for token in tokens if token not in stop_words] | |
| tokens = [lemmatizer.lemmatize(token, pos='v') for token in tokens] | |
| embeddings = [] | |
| for token in tokens: | |
| if token in word2vec_model.wv: | |
| embeddings.append(word2vec_model.wv[token]) | |
| else: | |
| embeddings.append(np.zeros(word2vec_model.vector_size)) | |
| max_timesteps = 100 | |
| if len(embeddings) > max_timesteps: | |
| embeddings = embeddings[:max_timesteps] | |
| else: | |
| padding = [np.zeros(word2vec_model.vector_size)] * (max_timesteps - len(embeddings)) | |
| embeddings.extend(padding) | |
| input_features = np.array(embeddings).reshape((1, max_timesteps, word2vec_model.vector_size)) | |
| return input_features | |
| # Load Word2Vec model | |
| def load_word2vec_model(): | |
| repo_id = 'Preethamreddy799/NLP_MODEL' | |
| filename = 'word2vec_model.bin' | |
| word2vec_path = hf_hub_download(repo_id=repo_id, filename=filename) | |
| return gensim.models.Word2Vec.load(word2vec_path) | |
| # Load LSTM model | |
| def load_model_test_steps(): | |
| repo_id = 'Preethamreddy799/NLP_MODEL' | |
| filename = 'model_test_steps.h5' | |
| cached_model_path = hf_hub_download(repo_id=repo_id, filename=filename) | |
| model = load_model(cached_model_path, compile=False) | |
| print(f"Model loaded successfully from {cached_model_path}") | |
| return model | |
| # Initialize models | |
| word2vec_model = load_word2vec_model() | |
| lstm_model = load_model_test_steps() | |
| # Streamlit App | |
| st.title("Test Case Steps Generator") | |
| st.write("This app generates test steps based on Test Case Acceptance Criteria.") | |
| acceptance_criteria = st.text_area("Enter Test Case Acceptance Criteria") | |
| if st.button("Generate Test Steps"): | |
| if acceptance_criteria: | |
| input_features = preprocess_text(acceptance_criteria, word2vec_model) | |
| print("Original Shape:", input_features.shape) # Debugging | |
| # Adjust input for the model's requirements | |
| input_features = input_features[:, :1, :] # Retain only the first timestep | |
| print("Adjusted Shape:", input_features.shape) # Debugging | |
| try: | |
| predicted_steps = lstm_model.predict(input_features) | |
| st.subheader("Generated Test Steps") | |
| st.write(predicted_steps) | |
| except Exception as e: | |
| st.error(f"Error generating predictions: {e}") | |