import streamlit as st import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import numpy as np from collections import defaultdict, Counter import base64 from sklearn.manifold import MDS import networkx as nx from streamlit_float import * st.set_page_config(layout="wide") # Initialize float feature float_init() st.markdown(""" """, unsafe_allow_html=True) # Define allowed characters (single characters and multi-character tokens) ALLOWED_SINGLE_CHARS = set('4O892ERSZPBFVQWXYACIGH1TU0DNM3JKL567') ALLOWED_MULTI_CHARS = ['(n)', '(v)'] def parse_voynich_word(word): """Parse a Voynich word into individual characters - treating (n) and (v) as single units""" if not word or word.strip() == '': return None, None word = word.strip() chars = [] i = 0 while i < len(word): # Check for multi-character tokens first if i + 2 < len(word): three_char = word[i:i+3] if three_char in ALLOWED_MULTI_CHARS: chars.append(three_char) i += 3 continue # Otherwise check single character if word[i] in ALLOWED_SINGLE_CHARS: chars.append(word[i]) i += 1 # If no valid characters remain, return None if not chars: return None, None # Reconstruct the filtered word filtered_word = ''.join(chars) return filtered_word, chars @st.cache_data def analyze_csv(df_hash): """Cached analysis function - only recalculates when CSV changes""" df = st.session_state.df_data words = [] chars_list = [] char_positions = defaultdict(list) char_connections = defaultdict(Counter) word_positions = [] line_word_map = defaultdict(Counter) for line_idx, row in df.iterrows(): line_words = [] # Get the entire row as a single string and split by commas row_text = ','.join(str(val) for val in row if pd.notna(val)) word_strings = row_text.split(',') # Process each word in the line for col_idx, word_str in enumerate(word_strings): if word_str.strip(): word, chars = parse_voynich_word(word_str) if word and chars: words.append(word) chars_list.append(chars) line_words.append((word, col_idx, chars)) line_word_map[line_idx][word] += 1 for j, char in enumerate(chars): char_positions[char].append(j) for j in range(len(chars) - 1): char_connections[chars[j]][chars[j+1]] += 1 if line_words: word_positions.append({ 'line': line_idx, 'words': line_words }) return words, chars_list, char_positions, char_connections, word_positions, line_word_map @st.cache_data def create_length_groups(words, chars_list): """Pre-calculate all length groups - cached for performance""" length_groups = defaultdict(list) for word, chars in zip(words, chars_list): length = len(chars) if length <= 20: length_groups[length].append((word, chars)) return length_groups def create_12_slot_table(chars_list): slot_frequencies = [Counter() for _ in range(12)] for chars in chars_list: for i, char in enumerate(chars[:12]): slot_frequencies[i][char] += 1 # Calculate totals for each slot slot_totals = [sum(counter.values()) for counter in slot_frequencies] data = [] all_chars = sorted(set(char for counter in slot_frequencies for char in counter)) for char in all_chars: row = {'Character': char} for i in range(12): count = slot_frequencies[i][char] row[f'Slot_{i+1}'] = count if slot_totals[i] > 0: row[f'Slot_{i+1}_Pct'] = f"{(count / slot_totals[i] * 100):.2f}%" else: row[f'Slot_{i+1}_Pct'] = "0.00%" data.append(row) # Reorder columns to alternate count and percentage df = pd.DataFrame(data) ordered_cols = ['Character'] for i in range(12): ordered_cols.append(f'Slot_{i+1}') ordered_cols.append(f'Slot_{i+1}_Pct') return df[ordered_cols] def analyze_slot_structure(chars_list): slot_contents = defaultdict(Counter) max_slots = 0 for chars in chars_list: if len(chars) > max_slots: max_slots = len(chars) for i, char in enumerate(chars): slot_contents[i][char] += 1 slot_summary = {} for slot in range(max_slots): if slot in slot_contents: common_chars = slot_contents[slot].most_common(10) slot_summary[slot] = common_chars return slot_summary, max_slots def create_line_word_scatter(line_word_map): all_words = set() for word_counter in line_word_map.values(): all_words.update(word_counter.keys()) lines = sorted(line_word_map.keys()) word_freq_matrix = np.zeros((len(lines), len(all_words))) for i, line in enumerate(lines): for j, word in enumerate(all_words): word_freq_matrix[i, j] = line_word_map[line][word] mds = MDS(n_components=2, random_state=42) line_coords = mds.fit_transform(word_freq_matrix) fig, ax = plt.subplots(figsize=(12, 8)) scatter = ax.scatter(line_coords[:, 0], line_coords[:, 1]) for i, line in enumerate(lines): ax.annotate(f"L{line}", (line_coords[i, 0], line_coords[i, 1])) ax.set_title('Line Similarity based on Word Usage') ax.set_xlabel('Dimension 1') ax.set_ylabel('Dimension 2') return fig def get_download_link_csv(df, filename): csv = df.to_csv(index=False) b64 = base64.b64encode(csv.encode()).decode() href = f'Download CSV' return href st.title("Voynich Manuscript Analyzer") st.write("Upload your CSV file.") # Upload eva legend to sidebar floating_image_file = st.sidebar.file_uploader("Upload an image", type=['png', 'jpg', 'jpeg', 'gif'], key="floating_image") if floating_image_file is not None: st.sidebar.image(floating_image_file, width=150, caption="Legend") uploaded_file = st.file_uploader("Choose a CSV file", type="csv") if uploaded_file is not None: # Read the entire file as text first uploaded_file.seek(0) content = uploaded_file.read().decode('utf-8') # Split into lines (handle both \n and \r\n) lines = content.replace('\r\n', '\n').replace('\r', '\n').strip().split('\n') # Filter out empty lines - only keep lines with actual content lines = [line for line in lines if line.strip()] data = [line.split(',') for line in lines] # Create DataFrame from parsed data df = pd.DataFrame(data) # Store in session state and create hash for caching st.session_state.df_data = df df_hash = hash(content) # Use cached analysis words, chars_list, char_positions, char_connections, word_positions, line_word_map = analyze_csv(df_hash) # Pre-calculate length groups (cached) length_groups = create_length_groups(words, chars_list) st.subheader("Basic Statistics") st.write(f"Total words: {len(words)}") st.write(f"Total unique words: {len(set(words))}") unique_chars = set() for chars in chars_list: unique_chars.update(chars) st.write(f"Total unique characters: {len(unique_chars)}") st.write("Unique characters:", ", ".join(sorted(unique_chars))) st.subheader("Sample Words (Character-by-Character)") sample_df = pd.DataFrame([ {'Word': word, 'Characters': ' | '.join(chars), 'Length': len(chars)} for word, chars in zip(words[:20], chars_list[:20]) ]) st.dataframe(sample_df) st.subheader("Character Bigram Analysis") st.write("This reveals which character pairs occur most frequently - potential digraphs emerge from the data") char_bigrams = Counter() for chars in chars_list: for i in range(len(chars)-1): bigram = tuple(chars[i:i+2]) char_bigrams[bigram] += 1 total_char_bigrams = sum(char_bigrams.values()) char_bigram_df = pd.DataFrame([ {'Bigram': ''.join(str(c) for c in bigram), 'Char1': str(bigram[0]), 'Char2': str(bigram[1]), 'Count': int(count), 'Percentage': f"{(count / total_char_bigrams * 100):.2f}%"} for bigram, count in char_bigrams.most_common(30) ]) st.dataframe(char_bigram_df) st.markdown(get_download_link_csv(char_bigram_df, "char_bigrams.csv"), unsafe_allow_html=True) st.subheader("Character Trigram Analysis") st.write("Three-character sequences - looking for common patterns") char_trigrams = Counter() for chars in chars_list: for i in range(len(chars)-2): trigram = tuple(chars[i:i+3]) char_trigrams[trigram] += 1 total_char_trigrams = sum(char_trigrams.values()) char_trigram_df = pd.DataFrame([ {'Trigram': ''.join(str(c) for c in trigram), 'Count': int(count), 'Percentage': f"{(count / total_char_trigrams * 100):.2f}%"} for trigram, count in char_trigrams.most_common(30) ]) st.dataframe(char_trigram_df) st.markdown(get_download_link_csv(char_trigram_df, "char_trigrams.csv"), unsafe_allow_html=True) st.subheader("Word Bigram Analysis") st.write("Consecutive word pairs within each line") word_bigrams = Counter() # Only count bigrams from consecutive words within the same line for line_data in word_positions: line_words = [word for word, _, _ in line_data['words']] for i in range(len(line_words)-1): bigram = tuple(line_words[i:i+2]) word_bigrams[bigram] += 1 total_word_bigrams = sum(word_bigrams.values()) if total_word_bigrams > 0: word_bigram_df = pd.DataFrame([ {'Word1': str(bigram[0]), 'Word2': str(bigram[1]), 'Count': int(count), 'Percentage': f"{(count / total_word_bigrams * 100):.2f}%"} for bigram, count in word_bigrams.most_common(20) ]) st.dataframe(word_bigram_df) st.markdown(get_download_link_csv(word_bigram_df, "word_bigrams.csv"), unsafe_allow_html=True) else: st.write("No word bigrams found (lines contain only single words)") st.subheader("Word Trigram Analysis") st.write("Consecutive word triples within each line") word_trigrams = Counter() # Only count trigrams from consecutive words within the same line for line_data in word_positions: line_words = [word for word, _, _ in line_data['words']] for i in range(len(line_words)-2): trigram = tuple(line_words[i:i+3]) word_trigrams[trigram] += 1 total_word_trigrams = sum(word_trigrams.values()) if total_word_trigrams > 0: word_trigram_df = pd.DataFrame([ {'Word1': str(trigram[0]), 'Word2': str(trigram[1]), 'Word3': str(trigram[2]), 'Count': int(count), 'Percentage': f"{(count / total_word_trigrams * 100):.2f}%"} for trigram, count in word_trigrams.most_common(20) ]) st.dataframe(word_trigram_df) st.markdown(get_download_link_csv(word_trigram_df, "word_trigrams.csv"), unsafe_allow_html=True) else: st.write("No word trigrams found (lines contain fewer than 3 consecutive words)") st.subheader("Character Frequency by Position") slot_freq_df = create_12_slot_table(chars_list) st.dataframe(slot_freq_df) st.markdown(get_download_link_csv(slot_freq_df, "slot_frequencies.csv"), unsafe_allow_html=True) slot_summary, max_slots = analyze_slot_structure(chars_list) st.subheader("Words by Length Analysis") selected_length = st.selectbox("Select word length to analyze:", sorted(length_groups.keys()), key="length_selector") if selected_length: words_of_length = length_groups[selected_length] position_chars = [Counter() for _ in range(selected_length)] for _, chars in words_of_length: for i, char in enumerate(chars): position_chars[i][char] += 1 # Calculate totals for each position position_totals = [sum(counter.values()) for counter in position_chars] st.write(f"Found {len(words_of_length)} words of length {selected_length}") freq_data = [] for char in sorted(unique_chars): row = {'Character': char} for pos in range(selected_length): count = position_chars[pos][char] row[f'Pos_{pos+1}'] = count if position_totals[pos] > 0: row[f'Pos_{pos+1}_Pct'] = f"{(count / position_totals[pos] * 100):.2f}%" else: row[f'Pos_{pos+1}_Pct'] = "0.00%" freq_data.append(row) freq_df = pd.DataFrame(freq_data) # Reorder columns to alternate count and percentage ordered_cols = ['Character'] for pos in range(selected_length): ordered_cols.append(f'Pos_{pos+1}') ordered_cols.append(f'Pos_{pos+1}_Pct') freq_df = freq_df[ordered_cols] st.dataframe(freq_df) st.markdown(get_download_link_csv(freq_df, f"length_{selected_length}_analysis.csv"), unsafe_allow_html=True) st.write("Sample words of this length:") sample_df = pd.DataFrame([ {'Word': word, 'Characters': ' | '.join(chars)} for word, chars in words_of_length[:30] ]) st.dataframe(sample_df) st.subheader("Word Distribution Across Lines") line_scatter = create_line_word_scatter(line_word_map) st.pyplot(line_scatter) st.subheader("Character Context Analysis") st.write("Select a character to see what comes before and after it") unique_chars_sorted = sorted(set(char for chars in chars_list for char in chars)) selected_char = st.selectbox("Select a character to analyze:", unique_chars_sorted, key="char_selector") if selected_char: before_counter = Counter() after_counter = Counter() for chars in chars_list: for i, char in enumerate(chars): if char == selected_char: if i > 0: before_counter[chars[i-1]] += 1 if i < len(chars) - 1: after_counter[chars[i+1]] += 1 col1, col2 = st.columns(2) with col1: st.write(f"Characters that commonly PRECEDE '{selected_char}':") total_before = sum(before_counter.values()) before_data = [ {'Character': char, 'Count': count, 'Percentage': f"{(count / total_before * 100):.2f}%"} for char, count in before_counter.most_common(15) ] before_df = pd.DataFrame(before_data) st.dataframe(before_df) fig1, ax1 = plt.subplots(figsize=(8, 6)) plt.bar(before_df['Character'], before_df['Count']) plt.title(f"Characters before '{selected_char}'") plt.xticks(rotation=45) st.pyplot(fig1) with col2: st.write(f"Characters that commonly FOLLOW '{selected_char}':") total_after = sum(after_counter.values()) after_data = [ {'Character': char, 'Count': count, 'Percentage': f"{(count / total_after * 100):.2f}%"} for char, count in after_counter.most_common(15) ] after_df = pd.DataFrame(after_data) st.dataframe(after_df) fig2, ax2 = plt.subplots(figsize=(8, 6)) plt.bar(after_df['Character'], after_df['Count']) plt.title(f"Characters after '{selected_char}'") plt.xticks(rotation=45) st.pyplot(fig2) st.subheader("Line Viewer") available_lines = sorted(set(line_data['line'] for line_data in word_positions)) selected_line = st.selectbox("Select Line:", [''] + [f"Line {line}" for line in available_lines], key="line_selector") if selected_line: line_num = int(selected_line.replace('Line ', '')) line_words = next((line_data['words'] for line_data in word_positions if line_data['line'] == line_num), []) for word, _, chars in line_words: st.write(f"**Word: {word}** ({len(chars)} characters)") cols = st.columns(min(20, max(12, len(chars)))) for i in range(len(chars)): with cols[i]: char = chars[i] st.markdown(f"""