File size: 2,566 Bytes
2d33220
 
 
 
 
 
 
3bea204
2d33220
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import gradio as gr
import pandas as pd
from googleapiclient.discovery import build
import re
from transformers import pipeline

# โ›” Replace with your actual API key
YOUTUBE_API_KEY = "AIzaSyAgdAAGU2ySpnsjx1lv6dJ4fmJOXYU0Ggw"

# Setup: Load Hugging Face sentiment analysis pipeline
sentiment_pipeline = pipeline("sentiment-analysis")

# Function to extract video ID from YouTube URL
def extract_video_id(url):
    patterns = [
        r"(?:youtube\.com\/watch\?v=|youtu\.be\/|youtube\.com\/embed\/)([^&\n?#]+)",
        r"youtube\.com\/shorts\/([^&\n?#]+)"
    ]
    for pattern in patterns:
        match = re.search(pattern, url)
        if match:
            return match.group(1)
    return None

# Function to fetch comments using YouTube API
def fetch_comments(video_url, max_results=10):
    video_id = extract_video_id(video_url)
    if not video_id:
        return pd.DataFrame({"error": ["Invalid YouTube URL"]})
    
    youtube = build("youtube", "v3", developerKey=YOUTUBE_API_KEY)
    request = youtube.commentThreads().list(
        part="snippet",
        videoId=video_id,
        maxResults=max_results,
        textFormat="plainText"
    )
    comments = []
    try:
        response = request.execute()
        for item in response["items"]:
            comment = item["snippet"]["topLevelComment"]["snippet"]["textDisplay"]
            comments.append(comment)
        return pd.DataFrame({"Comment": comments})
    except Exception as e:
        return pd.DataFrame({"error": [str(e)]})

# Main analysis function
def analyze_video(video_url, max_comments=10):
    df = fetch_comments(video_url, max_comments)
    if "error" in df.columns:
        return df.to_string(index=False)
    
    results = []
    for comment in df["Comment"]:
        result = sentiment_pipeline(comment[:512])[0]
        results.append({
            "Comment": comment,
            "Sentiment": result["label"],
            "Score": round(result["score"], 3)
        })
    result_df = pd.DataFrame(results)
    return result_df

# Gradio UI
with gr.Blocks(title="YouTube Comment Sentiment Analyzer") as demo:
    gr.Markdown("# ๐Ÿ“Š YouTube Comment Sentiment Analyzer")

    video_url = gr.Textbox(label="๐Ÿ“บ YouTube Video URL", placeholder="Paste the video link here")
    max_comments = gr.Slider(1, 100, value=10, step=1, label="Number of Comments")

    btn = gr.Button("Analyze")

    output = gr.Dataframe(label="Sentiment Analysis Result", interactive=False)

    btn.click(fn=analyze_video, inputs=[video_url, max_comments], outputs=output)

demo.launch()