Spaces:
Sleeping
Sleeping
File size: 1,590 Bytes
2b0db33 9c25ea0 c64f408 a7a55fb 6366ef3 de3ee2e c64f408 fd5f4f5 917b452 9cdeb9f 97d1fe2 9c25ea0 450e98e c64f408 356760c 9c25ea0 d5cc24a 9c25ea0 604235c 9c25ea0 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 | import torch
import gradio as gr
from transformers import pipeline, BertTokenizer, BertForQuestionAnswering
from datasets import load_dataset
# Load the dataset
advice_dataset = load_dataset("ziq/depression_advice")
# Load the fine-tuned BERT model and tokenizer
model_dir = "./bert-finetuned-depression"
model = BertForQuestionAnswering.from_pretrained(model_dir)
tokenizer = BertTokenizer.from_pretrained(model_dir)
# Extract context and messages
contexts = advice_dataset["train"]["text"]
# Define a function to generate answers
def generate_answer(messages):
# If messages is a list, use the first message
if isinstance(messages, list):
messages = messages[0]
# Tokenize the input message
inputs = tokenizer(messages, return_tensors="pt")
# Use the fine-tuned BERT model to generate the answer for the single message
with torch.no_grad():
outputs = model(**inputs)
# Decode the output and return the answer
answer_start = torch.argmax(outputs.start_logits)
answer_end = torch.argmax(outputs.end_logits) + 1
answer = tokenizer.decode(inputs["input_ids"][0][answer_start:answer_end])
return answer if answer else "No answer found."
# Create a Gradio interface
iface = gr.Interface(
fn=generate_answer,
inputs=[
gr.Textbox(type="text", label="Message"),
],
outputs=gr.Textbox(type="text", label="Answer"),
title="Depression Advice Generator",
description="Enter your feelings, and get supportive advice generated by a fine-tuned BERT model.",
)
# Launch the interface
iface.launch()
|