|
|
|
|
|
import gradio as gr |
|
|
from transformers import pipeline |
|
|
import torch |
|
|
|
|
|
|
|
|
MODEL_NAME = "koheiduck/bert-japanese-finetuned-sentiment" |
|
|
sentiment_analyzer = None |
|
|
|
|
|
def load_sentiment_analyzer(): |
|
|
global sentiment_analyzer |
|
|
if sentiment_analyzer is None: |
|
|
print(f"Loading sentiment analysis model: {MODEL_NAME}...") |
|
|
try: |
|
|
|
|
|
device = 0 if torch.cuda.is_available() else -1 |
|
|
sentiment_analyzer = pipeline( |
|
|
"sentiment-analysis", |
|
|
model=MODEL_NAME, |
|
|
tokenizer=MODEL_NAME, |
|
|
device=device |
|
|
) |
|
|
print(f"Model {MODEL_NAME} loaded successfully on {'GPU' if device == 0 else 'CPU'}.") |
|
|
except Exception as e: |
|
|
print(f"Error loading model {MODEL_NAME}: {e}") |
|
|
return sentiment_analyzer |
|
|
|
|
|
|
|
|
def analyze_sentiment_gradio(text_input): |
|
|
analyzer = load_sentiment_analyzer() |
|
|
if analyzer is None: |
|
|
return "エラー: モデルのロードに失敗しました。" |
|
|
if not text_input: |
|
|
return "テキストを入力してください。" |
|
|
try: |
|
|
print(f"Analyzing sentiment for: {text_input}") |
|
|
result = analyzer(text_input) |
|
|
|
|
|
|
|
|
if result and isinstance(result, list) and len(result) > 0: |
|
|
label = result[0]['label'] |
|
|
score = result[0]['score'] |
|
|
return f"感情: {label} (スコア: {score:.4f})" |
|
|
else: |
|
|
return "分析結果の取得に失敗しました。" |
|
|
except Exception as e: |
|
|
print(f"Error during sentiment analysis: {e}") |
|
|
return f"エラーが発生しました: {str(e)}" |
|
|
|
|
|
|
|
|
|
|
|
load_sentiment_analyzer() |
|
|
|
|
|
iface = gr.Interface( |
|
|
fn=analyze_sentiment_gradio, |
|
|
inputs=gr.Textbox(lines=5, placeholder="ここに日本語の文章を入力してください...", label="分析したい文章"), |
|
|
outputs=gr.Textbox(label="感情分析結果"), |
|
|
title="日本語 感情分析デモ", |
|
|
description=f"'{MODEL_NAME}' モデルを使用した感情分析デモです。文章を入力して「Submit」を押してください。" |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
iface.launch() |