Spaces:
Runtime error
Runtime error
| import pandas as pd | |
| from datasets import Dataset | |
| from transformers import DistilBertForSequenceClassification, DistilBertTokenizer | |
| import torch | |
| import gradio as gr | |
| from huggingface_hub import HfApi, Repository | |
| import os | |
| # Step 1: Set your Hugging Face access token | |
| hf_token = "" # Replace with your actual token | |
| # Initialize the Hugging Face API client | |
| api = HfApi() | |
| # Step 2: Create a sample dataset | |
| data = { | |
| "Name": ["John Doe", "Jane Smith", "Mike Johnson"], | |
| "Email": ["johndoe@example.com", "janesmith@example.com", "mikej@example.com"], | |
| "Case Problem": ["Login Issues", "Payment Failure", "UI Bug"], | |
| "Feedback": ["Negative", "Positive", "Neutral"], | |
| "Details": [ | |
| "Unable to login after password reset.", | |
| "Payment went through after retrying.", | |
| "The interface is a bit confusing at times." | |
| ] | |
| } | |
| # Create a DataFrame from the sample data | |
| df = pd.DataFrame(data) | |
| # Convert the DataFrame into a Hugging Face Dataset | |
| dataset = Dataset.from_pandas(df) | |
| # Step 3: Upload the dataset to Hugging Face | |
| repo_id = "SailajaS/case-feedback-dataset" # Replace with your actual Hugging Face repo name | |
| try: | |
| # Create a repository on Hugging Face to store the dataset | |
| api.create_repo(repo_id=repo_id, repo_type="dataset", token=hf_token) | |
| print(f"Successfully created repository: {repo_id}") | |
| except Exception as e: | |
| print(f"Error creating repository: {e}") | |
| # Initialize the repository and push the dataset to Hugging Face | |
| repo = Repository(local_dir="./dataset_repo", clone_from=repo_id) | |
| dataset.to_csv("./dataset_repo/dataset.csv") | |
| repo.push_to_hub() | |
| # Step 4: Load pre-trained DistilBERT model and tokenizer | |
| model_name = "distilbert-base-uncased" | |
| model = DistilBertForSequenceClassification.from_pretrained(model_name) | |
| tokenizer = DistilBertTokenizer.from_pretrained(model_name) | |
| # Step 5: Define a function to predict feedback based on case details | |
| def predict_case_feedback(details): | |
| inputs = tokenizer(details, return_tensors="pt", padding=True, truncation=True, max_length=512) | |
| with torch.no_grad(): | |
| outputs = model(**inputs) | |
| logits = outputs.logits | |
| predicted_class = logits.argmax(dim=-1).item() | |
| feedback_labels = ["Negative", "Positive", "Neutral"] | |
| feedback = feedback_labels[predicted_class] | |
| return f"Predicted Feedback: {feedback}" | |
| # Step 6: Create Gradio interface for the prediction function | |
| interface = gr.Interface( | |
| fn=predict_case_feedback, | |
| inputs="text", # Input: Text | |
| outputs="text", # Output: Text | |
| title="Case Feedback Prediction", | |
| description="Enter the case details to predict feedback (Positive, Negative, Neutral)" | |
| ) | |
| # Step 7: Launch the Gradio app | |
| interface.launch() | |