Elhanan08 commited on
Commit
6cfe6b8
·
verified ·
1 Parent(s): 5b8e936

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +121 -38
src/streamlit_app.py CHANGED
@@ -1,40 +1,123 @@
1
- import altair as alt
2
- import numpy as np
3
- import pandas as pd
4
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- """
7
- # Welcome to Streamlit!
8
-
9
- Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
10
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
11
- forums](https://discuss.streamlit.io).
12
-
13
- In the meantime, below is an example of what you can do with just a few lines of code:
14
- """
15
-
16
- num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
17
- num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
18
-
19
- indices = np.linspace(0, 1, num_points)
20
- theta = 2 * np.pi * num_turns * indices
21
- radius = indices
22
-
23
- x = radius * np.cos(theta)
24
- y = radius * np.sin(theta)
25
-
26
- df = pd.DataFrame({
27
- "x": x,
28
- "y": y,
29
- "idx": indices,
30
- "rand": np.random.randn(num_points),
31
- })
32
-
33
- st.altair_chart(alt.Chart(df, height=700, width=700)
34
- .mark_point(filled=True)
35
- .encode(
36
- x=alt.X("x", axis=None),
37
- y=alt.Y("y", axis=None),
38
- color=alt.Color("idx", legend=None, scale=alt.Scale()),
39
- size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
40
- ))
 
 
 
 
1
  import streamlit as st
2
+ import pandas as pd
3
+ import matplotlib.pyplot as plt
4
+ from wordcloud import WordCloud
5
+ import requests
6
+ import re
7
+ import torch
8
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
9
+ from scipy.special import softmax
10
+
11
+ # ---------------------- Setup Awal ----------------------
12
+ st.set_page_config(page_title="Analisis Opini Twitter Indonesia", layout="wide")
13
+
14
+ # Load model IndoBERT Sentiment
15
+ @st.cache_resource
16
+ def load_model():
17
+ tokenizer = AutoTokenizer.from_pretrained("indobenchmark/indobert-base-p1")
18
+ model = AutoModelForSequenceClassification.from_pretrained("mdhugol/indonesia-bert-sentiment-classification")
19
+ return tokenizer, model
20
+
21
+ tokenizer, model = load_model()
22
+ label_map = {0: 'Negatif', 1: 'Netral', 2: 'Positif'}
23
+
24
+ # Header
25
+ st.title("🇮🇩 Dashboard Analisis Sentimen X")
26
+ st.markdown("""
27
+ Aplikasi ini menggunakan model IndoBERT untuk menganalisis sentimen tweet berbahasa Indonesia dari Twitter atau file CSV.
28
+ """)
29
+
30
+ # Sidebar
31
+ st.sidebar.header("Konfigurasi")
32
+ twitter_auth_token = st.sidebar.text_input("Twitter Bearer Token:", type="password")
33
+ mode = st.sidebar.radio("Pilih Mode Data:", ["Ambil dari Twitter", "Upload File CSV"])
34
+
35
+ # Fungsi Analisis
36
+ @st.cache_data
37
+ def analisis_sentimen_indober(df):
38
+ results = []
39
+ for text in df['content']:
40
+ inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
41
+ with torch.no_grad():
42
+ logits = model(**inputs).logits
43
+ probs = softmax(logits.numpy()[0])
44
+ label_idx = probs.argmax()
45
+ results.append(label_map[label_idx])
46
+ df['label'] = results
47
+ return df
48
+
49
+ def tampilkan_hasil(df):
50
+ st.write("### Contoh komentar", df.head())
51
+ st.write("### Distribusi Sentimen")
52
+ st.bar_chart(df['label'].value_counts())
53
+
54
+ st.write("### WordCloud")
55
+ all_text = " ".join(df['content'])
56
+ wordcloud = WordCloud(width=800, height=400, background_color='white').generate(all_text)
57
+ fig, ax = plt.subplots()
58
+ ax.imshow(wordcloud, interpolation='bilinear')
59
+ ax.axis('off')
60
+ st.pyplot(fig)
61
+
62
+ csv = df.to_csv(index=False).encode('utf-8')
63
+ st.download_button(
64
+ label="📥 Download Hasil CSV",
65
+ data=csv,
66
+ file_name='hasil_sentimen_indonesia.csv',
67
+ mime='text/csv',
68
+ )
69
+
70
+ # Mode Twitter API
71
+ if mode == "Ambil dari Twitter":
72
+ keyword = st.text_input("Masukkan Kata Kunci:")
73
+ jumlah = st.slider("Jumlah Tweet:", 10, 100, 30)
74
+
75
+ if st.button("Ambil dan Analisis Tweet"):
76
+ if not twitter_auth_token:
77
+ st.error("Silakan masukkan Bearer Token.")
78
+ elif not keyword:
79
+ st.warning("Masukkan kata kunci terlebih dahulu.")
80
+ elif any(ord(c) > 127 for c in keyword):
81
+ st.error("Kata kunci tidak boleh mengandung emoji atau karakter non-ASCII.")
82
+ else:
83
+ with st.spinner("Mengambil tweet..."):
84
+ headers = {
85
+ "Authorization": f"Bearer {twitter_auth_token}",
86
+ "User-Agent": "StreamlitApp"
87
+ }
88
+ query = re.sub(r'[^\w\s]', '', keyword)
89
+ url = f"https://api.twitter.com/2/tweets/search/recent?query={query} lang:id&max_results={jumlah}&tweet.fields=created_at,text"
90
+
91
+ try:
92
+ response = requests.get(url, headers=headers)
93
+ if response.status_code == 200:
94
+ tweets = response.json().get("data", [])
95
+ if tweets:
96
+ df = pd.DataFrame(tweets)
97
+ df.rename(columns={"created_at": "date", "text": "content"}, inplace=True)
98
+ df = analisis_sentimen_indober(df)
99
+ tampilkan_hasil(df)
100
+ else:
101
+ st.warning("Tidak ada tweet ditemukan.")
102
+ elif response.status_code == 401:
103
+ st.error("Token tidak valid atau tidak memiliki izin.")
104
+ elif response.status_code == 429:
105
+ st.error("Terlalu banyak permintaan. Tunggu beberapa saat dan coba lagi.")
106
+ else:
107
+ st.error(f"Gagal mengambil data. Status: {response.status_code}")
108
+ except Exception as e:
109
+ st.error(f"Terjadi kesalahan: {e}")
110
 
111
+ # ---------------------- Mode Upload CSV ----------------------
112
+ elif mode == "Upload File CSV":
113
+ uploaded_file = st.file_uploader("Unggah file CSV dengan kolom 'content'", type="csv")
114
+ if uploaded_file:
115
+ try:
116
+ df = pd.read_csv(uploaded_file)
117
+ if 'content' not in df.columns:
118
+ st.error("Kolom 'content' tidak ditemukan.")
119
+ else:
120
+ df = analisis_sentimen_indober(df)
121
+ tampilkan_hasil(df)
122
+ except Exception as e:
123
+ st.error(f"Gagal membaca file: {e}")