Amritpal Singh commited on
Commit
e6db743
·
1 Parent(s): 5c6e526

Initial commit of Bayesian Simulator

Browse files
Files changed (3) hide show
  1. Dockerfile +14 -0
  2. app.py +55 -0
  3. requirements.txt +6 -0
Dockerfile ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9-slim
2
+ WORKDIR /app
3
+ # Install system dependencies
4
+ RUN apt-get update && apt-get install -y build-essential
5
+ # Install Python dependencies
6
+ COPY requirements.txt .
7
+ RUN pip install --upgrade pip
8
+ RUN pip install -r requirements.txt
9
+ # Copy app files
10
+ COPY . .
11
+ # Expose default Streamlit port
12
+ EXPOSE 8501
13
+ # Run Streamlit app
14
+ CMD ["streamlit", "run", "app.py", "--server.port=8501", "--server.address=0.0.0.0"]
app.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import numpy as np
3
+ import pandas as pd
4
+ import matplotlib.pyplot as plt
5
+ import seaborn as sns
6
+ from nltk.tokenize import word_tokenize
7
+ import nltk
8
+
9
+ nltk.download('punkt')
10
+
11
+ st.title("📊 Bayesian Token Co-occurrence Simulator")
12
+
13
+ # User input
14
+ user_input = st.text_area("✍️ Enter your training sentences (one per line):",
15
+ """
16
+ fido loves the red ball
17
+ timmy and fido go to the park
18
+ fido and timmy love to play
19
+ the red ball is timmy's favorite toy
20
+ """)
21
+
22
+ sentences = user_input.strip().split('\n')
23
+ tokenized = [word_tokenize(s.lower()) for s in sentences if s.strip()]
24
+ vocab = sorted(set(word for sentence in tokenized for word in sentence))
25
+ token2idx = {word: i for i, word in enumerate(vocab)}
26
+ idx2token = {i: word for word, i in token2idx.items()}
27
+
28
+ # Co-occurrence matrix
29
+ window_size = 2
30
+ matrix = np.zeros((len(vocab), len(vocab)))
31
+
32
+ for sentence in tokenized:
33
+ for i, word in enumerate(sentence):
34
+ for j in range(max(0, i - window_size), min(len(sentence), i + window_size + 1)):
35
+ if i != j:
36
+ matrix[token2idx[word]][token2idx[sentence[j]]] += 1
37
+
38
+ alpha = st.slider("🔧 Set Bayesian Prior (α smoothing)", 0.0, 2.0, 0.1)
39
+ posterior = matrix + alpha
40
+
41
+ df = pd.DataFrame(posterior, index=vocab, columns=vocab)
42
+ st.subheader("📈 Co-occurrence Heatmap")
43
+ fig, ax = plt.subplots(figsize=(10, 8))
44
+ sns.heatmap(df, annot=True, cmap="Blues", fmt=".1f", ax=ax)
45
+ st.pyplot(fig)
46
+
47
+ # Next-token prediction
48
+ selected_word = st.selectbox("🔮 Predict next token after:", vocab)
49
+ row = posterior[token2idx[selected_word]]
50
+ probs = row / row.sum()
51
+ prediction = np.random.choice(vocab, p=probs)
52
+
53
+ st.markdown(f"**Predicted next token:** `{prediction}`")
54
+
55
+
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ streamlit
2
+ nltk
3
+ numpy
4
+ pandas
5
+ matplotlib
6
+ seaborn