Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import transformers | |
| from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
| import numpy as np | |
| # Load the pre-trained text classification model from Hugging Face | |
| model_name = "bert-base-uncased" | |
| num_labels = 2 | |
| model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=num_labels) | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| def classify_text(text): | |
| # Preprocess the text input | |
| encoded_text = tokenizer(text, truncation=True, padding=True, return_tensors="pt") | |
| # Make predictions using the pre-trained model | |
| with torch.no_grad(): | |
| outputs = model(**encoded_text) | |
| logits = outputs.logits | |
| predictions = np.argmax(logits, axis=1) | |
| # Convert predictions to class labels | |
| class_labels = ["positive", "negative"] | |
| predicted_labels = [class_labels[i] for i in predictions] | |
| # Return the predicted labels | |
| return predicted_labels | |
| # Initialize the Streamlit app | |
| st.title("Text Classification Demo") | |
| # Create the text input field | |
| input_text = st.text_input("Enter text to classify:", "") | |
| # Make predictions and display the results | |
| if input_text: | |
| predicted_labels = classify_text(input_text) | |
| st.write("Predicted labels:", predicted_labels) | |