Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import BertTokenizer, BertForSequenceClassification | |
| import torch | |
| # Load the tokenizer and model | |
| tokenizer = BertTokenizer.from_pretrained('Non-playing-Character/RAGFiller') | |
| model = BertForSequenceClassification.from_pretrained('Non-playing-Character/RAGFiller', num_labels=30) | |
| # Define the labels | |
| labels = [ | |
| "That's an interesting question... let me see.", | |
| "Hmm, I need to consider that for a moment.", | |
| "Let me think about how best to address that.", | |
| "Well, I think it really depends on a few factors...", | |
| "Good thought! I need a moment to process that.", | |
| "You know, I've never really thought about it that way before.", | |
| "Okay, let me break that down for a second.", | |
| "That's a tough one... give me a second to gather my thoughts.", | |
| "I want to make sure I give you the right answer, so let me think.", | |
| "Let me reflect on that... there are a few angles to consider.", | |
| "Alright, if I remember correctly, it goes something like this...", | |
| "That's a good point, and I think the answer is...", | |
| "Good question! Let me take a moment to unpack that.", | |
| "Hmm, there's a lot to consider here. Give me a second.", | |
| "Let me think about that... it's not a straightforward answer.", | |
| "Interesting... I need to gather my thoughts on this.", | |
| "Well, let me consider the various aspects before I answer.", | |
| "Alright, let's break this down a bit before I answer.", | |
| "Good thought! I want to make sure I address it properly.", | |
| "Hmm, let's delve into that a bit more; I'll need a moment.", | |
| "Great question! I want to provide a thoughtful response, so let me think.", | |
| "That's a fascinating angle... let me think it through.", | |
| "I'll need a moment to come up with an answer.", | |
| "I'll take a quick moment to weigh my options.", | |
| "I appreciate the question; let me think it through.", | |
| "Let me take a step back and think that over.", | |
| "Let me mull that over for just a moment.", | |
| "I want to consider that carefully; let me pause for a second.", | |
| "Let's explore that further; I need a moment to think.", | |
| "I'd like to think that over before answering." | |
| ] | |
| def classify_text(text): | |
| # Tokenize the input text | |
| inputs = tokenizer(text, padding=True, truncation=True, return_tensors='pt') | |
| outputs = model(**inputs) | |
| predictions = torch.nn.functional.softmax(outputs.logits, dim=-1) | |
| # Convert predictions to numpy array | |
| predictions = predictions.cpu().detach().numpy() | |
| labeled_predictions = {labels[i]: predictions[0][i] for i in range(len(labels))} | |
| max_label = labels[predictions[0].argmax()] | |
| max_probability = predictions[0].max() | |
| result = {max_label: max_probability} | |
| return result | |
| # Create a Gradio interface | |
| gradio_app = gr.Interface(fn=classify_text, inputs="text", outputs="label") | |
| # Launch the interface | |
| gradio_app.launch() |