File size: 4,642 Bytes
b8c8b55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
import torch
from transformers import pipeline, set_seed, AutoTokenizer, AutoModelForSequenceClassification
from datasets import load_dataset
import gradio as gr
import requests
from bs4 import BeautifulSoup
from nltk.sentiment import SentimentIntensityAnalyzer
from flair.models import TextClassifier
from flair.data import Sentence
import newspaper3k

# Konfigurasi Model
set_seed(42)
nltk.download('vader_lexicon')

# Model Text Generation
generator = pipeline('text-generation', model='gpt2-xl')  # Model lebih canggih

# Model Sentiment Analysis
sentiment_analyzer = SentimentIntensityAnalyzer()
classifier = TextClassifier.load('en-sentiment')

# Model Klasifikasi Topik
tokenizer_topic = AutoTokenizer.from_pretrained("facebook/bart-large-mnli")
model_topic = AutoModelForSequenceClassification.from_pretrained("facebook/bart-large-mnli")

# Fungsi Helper
def get_trending_topics(platform):
  if platform == "Twitter":
    # Scraping trending topics dari Twitter
    url = "https://twitter.com/i/trends"
    response = requests.get(url)
    soup = BeautifulSoup(response.content, "html.parser")
    trends = [trend.text.strip() for trend in soup.find_all("div", class_="css-901oao r-1awozwy r-18jsvk2 r-6koalj r-370sk r-a023e6 r-b88u0q r-rjixqe r-bcqeeo r-1udh08x r-3s2u2q r-qvutc0")]
    return trends
  elif platform == "TikTok":
    # Scraping trending topics dari TikTok (perlu metode khusus)
    # ...
    return ["Trending TikTok 1", "Trending TikTok 2"]
  else:  # Instagram
    # Scraping trending topics dari Instagram (perlu metode khusus)
    # ...
    return ["Trending Instagram 1", "Trending Instagram 2"]

def find_related_trend(topic, trends):
  # Menggunakan model klasifikasi topik untuk mencari tren yang relevan
  topic_sentence = Sentence(topic)
  classifier.predict(topic_sentence)
  topic_sentiment = topic_sentence.labels[0]

  related_trends = []
  for trend in trends:
    trend_sentence = Sentence(trend)
    classifier.predict(trend_sentence)
    trend_sentiment = trend_sentence.labels[0]
    if topic_sentiment.value == trend_sentiment.value:
      related_trends.append(trend)
  return related_trends

def make_clickbait_title(title):
  # Menggunakan pola clickbait dan analisis sentimen
  title = title.strip()
  sentiment = sentiment_analyzer.polarity_scores(title)

  if sentiment['compound'] > 0.5:
    # Positif -> "Rahasia...", "Terungkap...", dll.
    clickbait_phrases = ["Rahasia ", "Terungkap ", "Hebat! ", "Menakjubkan! "]
  elif sentiment['compound'] < -0.5:
    # Negatif -> "Mengerikan...", "Kontroversial...", dll.
    clickbait_phrases = ["Mengerikan! ", "Kontroversial! ", "Awas! ", "Bahaya! "]
  else:
    # Netral -> "Kamu Tidak Akan Percaya...", "Heboh...", dll.
    clickbait_phrases = ["Kamu Tidak Akan Percaya...", "Heboh! ", "Viral! ", "Trending! "]

  return clickbait_phrases[0] + title

def analyze_content(content):
  # Analisis sentimen dan topik
  sentence = Sentence(content)
  classifier.predict(sentence)
  sentiment = sentence.labels[0]

  # Klasifikasi topik (perlu pengembangan lebih lanjut)
  inputs = tokenizer_topic(content, return_tensors="pt")
  outputs = model_topic(**inputs)
  topic_probs = torch.softmax(outputs.logits, dim=1)
  # ... (Interpretasi hasil klasifikasi topik) ...
  return sentiment, "Topik yang Diprediksi"

def generate_content(topic, format, tone, platform):
  trending_topics = get_trending_topics(platform)
  related_trends = find_related_trend(topic, trending_topics)

  prompt = f"Buat konten {format} tentang {topic} dengan gaya {tone} yang "
  if related_trends:
    prompt += f"berkaitan dengan tren {', '.join(related_trends)}."
  else:
    prompt += f"berpotensi menjadi viral."

  output = generator(prompt, max_length=500, num_return_sequences=1)
  content = output[0]['generated_text']

  if format == "Judul":
    content = make_clickbait_title(content)

  sentiment, predicted_topic = analyze_content(content)
  return content, sentiment.value, predicted_topic

# Antarmuka Gradio
iface = gr.Interface(
    fn=generate_content,
    inputs=[
        gr.Textbox(lines=2, placeholder="Masukkan topik..."),
        gr.Dropdown(["Teks", "Judul", "Tweet", "Artikel"], label="Format Konten"),
        gr.Dropdown(["Netral", "Provokatif", "Humor", "Marah", "Sedih"], label="Tone"),
        gr.Dropdown(["Twitter", "TikTok", "Instagram"], label="Platform")
    ],
    outputs=[
        "text",
        "text",
        "text"
    ],
    title="Pembuat Konten Viral (At Any Cost)",
    description="Hasilkan konten yang dirancang untuk menjadi viral (Eksperimental)."
)

iface.launch(share=True)  # share=True untuk mendapatkan link publik