Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,731 +1,11 @@
|
|
| 1 |
-
import streamlit as st
|
| 2 |
-
import numpy as np
|
| 3 |
-
import pandas as pd
|
| 4 |
-
|
| 5 |
-
# Mutation site headers removed 3614,
|
| 6 |
-
mutation_site_headers_actual = [
|
| 7 |
-
3244, 3297, 3350, 3399, 3455, 3509, 3562,
|
| 8 |
-
3665, 3720, 3773, 3824, 3879, 3933, 3985, 4039,
|
| 9 |
-
4089, 4145, 4190, 4245, 4298, 4349, 4402, 4455,
|
| 10 |
-
4510, 4561, 4615, 4668, 4720, 4773, 4828, 4882
|
| 11 |
-
]
|
| 12 |
-
# Thresholds for each mutation site removed 3614: 0.091557752,
|
| 13 |
-
thresholds_actual = pd.Series({
|
| 14 |
-
3244: 1.096910677, 3297: 0.923658795, 3350: 0.668939037, 3399: 0.914305214,
|
| 15 |
-
3455: 1.297392984, 3509: 1.812636208, 3562: 1.185047484,
|
| 16 |
-
3665: 0.298007308, 3720: 0.58857544, 3773: 0.882561082, 3824: 1.149082617,
|
| 17 |
-
3879: 0.816050702, 3933: 2.936517653, 3985: 1.597166791, 4039: 0.962108082,
|
| 18 |
-
4089: 1.479783497, 4145: 0.305853225, 4190: 1.311869541, 4245: 1.707556905,
|
| 19 |
-
4298: 0.875013076, 4349: 1.227704526, 4402: 0.593206446, 4455: 1.179633137,
|
| 20 |
-
4510: 1.272477799, 4561: 1.293841573, 4615: 1.16821885, 4668: 1.40306,
|
| 21 |
-
4720: 0.706530878, 4773: 1.483114072, 4828: 0.954939873, 4882: 1.47524328
|
| 22 |
-
})
|
| 23 |
-
|
| 24 |
-
# Mutation site headers reordered: 4402 to 3244, 4882 to 4455
|
| 25 |
-
mutation_site_headers = [
|
| 26 |
-
4402, 4349, 4298, 4245, 4190, 4145, 4089, 4039,
|
| 27 |
-
3985, 3933, 3879, 3824, 3773, 3720, 3665,
|
| 28 |
-
3562, 3509, 3455, 3399, 3350, 3297, 3244, # 1–23
|
| 29 |
-
4882, 4828, 4773, 4720, 4668, 4615, 4561, 4510, 4455 # 24–32
|
| 30 |
-
]
|
| 31 |
-
|
| 32 |
-
# Thresholds reordered accordingly
|
| 33 |
-
thresholds = pd.Series({h: thresholds_actual[h] for h in mutation_site_headers})
|
| 34 |
-
|
| 35 |
-
# === Utility functions ===
|
| 36 |
-
|
| 37 |
-
# Voyager ASCII 6-bit conversion table
|
| 38 |
-
voyager_table = {
|
| 39 |
-
i: ch for i, ch in enumerate([
|
| 40 |
-
' ', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I',
|
| 41 |
-
'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S',
|
| 42 |
-
'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '0', '1', '2',
|
| 43 |
-
'3', '4', '5', '6', '7', '8', '9', '.', ',', '(',
|
| 44 |
-
')','+', '-', '*', '/', '=', '$', '!', ':', '%',
|
| 45 |
-
'"', '#', '@', "'", '?', '&'
|
| 46 |
-
])
|
| 47 |
-
}
|
| 48 |
-
reverse_voyager_table = {v: k for k, v in voyager_table.items()}
|
| 49 |
-
|
| 50 |
-
def string_to_binary_labels(s: str) -> list[int]:
|
| 51 |
-
bits = []
|
| 52 |
-
for char in s:
|
| 53 |
-
val = reverse_voyager_table.get(char.upper(), 0)
|
| 54 |
-
char_bits = [(val >> bit) & 1 for bit in range(5, -1, -1)]
|
| 55 |
-
bits.extend(char_bits)
|
| 56 |
-
return bits
|
| 57 |
-
|
| 58 |
-
def binary_labels_to_string(bits: list[int]) -> str:
|
| 59 |
-
chars = []
|
| 60 |
-
for i in range(0, len(bits), 6):
|
| 61 |
-
chunk = bits[i:i+6]
|
| 62 |
-
if len(chunk) < 6:
|
| 63 |
-
chunk += [0] * (6 - len(chunk))
|
| 64 |
-
val = sum(b << (5 - j) for j, b in enumerate(chunk))
|
| 65 |
-
chars.append(voyager_table.get(val, '?'))
|
| 66 |
-
return ''.join(chars)
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
# === Streamlit App ===
|
| 70 |
-
|
| 71 |
-
st.title("ASCII & Binary Label Converter")
|
| 72 |
-
tab1, tab2, tab3, tab4, tab5, tab6 = st.tabs(["Text to Binary Labels (31)", "EF → Binary → String (31)", "Text to Binary Labels (32)", "EF → Binary (32)", "Binary → String", "Robot Script Generator"])
|
| 73 |
-
|
| 74 |
-
# Tab 1: Text to Binary
|
| 75 |
-
with tab1:
|
| 76 |
-
user_input = st.text_input("Enter text", value="DNA", key="input_text_31")
|
| 77 |
-
if user_input:
|
| 78 |
-
ascii_codes = [reverse_voyager_table.get(c.upper(), 0) for c in user_input]
|
| 79 |
-
binary_labels = string_to_binary_labels(user_input)
|
| 80 |
-
|
| 81 |
-
# st.subheader("Voyager ASCII Codes")
|
| 82 |
-
# st.write(ascii_codes)
|
| 83 |
-
|
| 84 |
-
st.subheader("Binary Labels per Character")
|
| 85 |
-
grouped = [binary_labels[i:i+6] for i in range(0, len(binary_labels), 6)]
|
| 86 |
-
for i, bits in enumerate(grouped):
|
| 87 |
-
st.write(f"'{user_input[i]}' → {bits}")
|
| 88 |
-
|
| 89 |
-
st.subheader("Binary Labels (31-bit groups)")
|
| 90 |
-
groups = []
|
| 91 |
-
for i in range(0, len(binary_labels), 31):
|
| 92 |
-
group = binary_labels[i:i+31]
|
| 93 |
-
group += [0] * (31 - len(group))
|
| 94 |
-
groups.append(group + [sum(group)])
|
| 95 |
-
|
| 96 |
-
df = pd.DataFrame(groups, columns=[str(h) for h in mutation_site_headers] + ["Edited Sites"])
|
| 97 |
-
st.dataframe(df)
|
| 98 |
-
st.download_button("Download as CSV", df.to_csv(index=False), "text_31_binary_labels.csv", key="download_csv_tab1_31csv")
|
| 99 |
-
|
| 100 |
-
ascending_headers = sorted(mutation_site_headers_actual)
|
| 101 |
-
df_sorted = df[[str(h) for h in ascending_headers if str(h) in df.columns]].copy()
|
| 102 |
-
|
| 103 |
-
if "3614" not in df_sorted.columns:
|
| 104 |
-
idx = df_sorted.columns.get_loc("3562") + 1 # Insert after 3562
|
| 105 |
-
df_sorted.insert(idx, "3614", 0)
|
| 106 |
-
|
| 107 |
-
st.subheader("Binary Labels (Ascending Order 3244 → 4882)")
|
| 108 |
-
st.dataframe(df_sorted)
|
| 109 |
-
st.download_button("Download Ascending Order CSV", df_sorted.to_csv(index=False), "text_binary_labels_ascending.csv", key="download_csv_tab1_ascend")
|
| 110 |
-
|
| 111 |
-
# === Robot Preparation Script Generation ===
|
| 112 |
-
st.subheader("Robot Preparation Script")
|
| 113 |
-
robot_template = pd.read_csv("/home/user/app/Robot.csv", skiprows=3)
|
| 114 |
-
robot_template.columns = ['Labware', 'Source', 'Labware_2', 'Destination', 'Volume', 'Tool', 'Name']
|
| 115 |
-
|
| 116 |
-
# Add Sample numbers for well referencing
|
| 117 |
-
df_sorted.insert(0, 'Sample', range(1, len(df_sorted)+1))
|
| 118 |
-
|
| 119 |
-
# Step 1: Count the number of edited sites per row
|
| 120 |
-
df_sorted['# donors'] = df_sorted.iloc[:, 1:].sum(axis=1)
|
| 121 |
-
|
| 122 |
-
# Step 2: Calculate volume per donor (32 / # donors)
|
| 123 |
-
df_sorted['volume donors (µl)'] = 32 / df_sorted['# donors']
|
| 124 |
-
|
| 125 |
-
# Step 3: Generate the robot script
|
| 126 |
-
robot_script = []
|
| 127 |
-
source_wells = robot_template['Source'].unique().tolist()
|
| 128 |
-
if len(source_wells) < 32:
|
| 129 |
-
source_wells += [f"Fake{i}" for i in range(32 - len(source_wells))]
|
| 130 |
-
source_wells = source_wells[:32]
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
st.write(f"Number of source wells: {len(source_wells)}")
|
| 134 |
-
st.write(f"Number of binary columns: {len(df_sorted.columns[1:33])}")
|
| 135 |
-
|
| 136 |
-
for i, col in enumerate(df_sorted.columns[1:33]):
|
| 137 |
-
for row_idx, sample in df_sorted.iterrows():
|
| 138 |
-
if sample[col] == 1:
|
| 139 |
-
source = source_wells[i]
|
| 140 |
-
dest = f"A{sample['Sample']}"
|
| 141 |
-
vol = round(sample['volume donors (µl)'], 2)
|
| 142 |
-
robot_script.append({'Source': source, 'Destination': dest, 'Volume': vol})
|
| 143 |
-
|
| 144 |
-
robot_script_df = pd.DataFrame(robot_script)
|
| 145 |
-
st.dataframe(robot_script_df)
|
| 146 |
-
st.download_button("Download Robot Script CSV", robot_script_df.to_csv(index=False), "robot_script.csv", key="download_csv_tab1_robot")
|
| 147 |
-
|
| 148 |
-
# === Robot Preparation Script (Custom Order: 4402 → 3244, 4882 → 4455) ===
|
| 149 |
-
st.subheader("Robot Preparation Script (Custom Order: 4402 → 3244, 4882 → 4455)")
|
| 150 |
-
|
| 151 |
-
# Include 3614 in custom header list
|
| 152 |
-
custom_headers = [
|
| 153 |
-
4402, 4349, 4298, 4245, 4190, 4145, 4089, 4039,
|
| 154 |
-
3985, 3933, 3879, 3824, 3773, 3720, 3665, 3614,
|
| 155 |
-
3562, 3509, 3455, 3399, 3350, 3297, 3244,
|
| 156 |
-
4882, 4828, 4773, 4720, 4668, 4615, 4561, 4510, 4455
|
| 157 |
-
]
|
| 158 |
-
|
| 159 |
-
# Create a copy of df and reorder columns based on custom headers
|
| 160 |
-
df_sorted_custom = df[[str(h) for h in custom_headers if str(h) in df.columns]].copy()
|
| 161 |
-
|
| 162 |
-
# Insert fake column "3614" if missing
|
| 163 |
-
if "3614" not in df_sorted_custom.columns:
|
| 164 |
-
idx = custom_headers.index(3614)
|
| 165 |
-
insert_at = idx # 0-based index
|
| 166 |
-
df_sorted_custom.insert(insert_at, "3614", 0)
|
| 167 |
-
|
| 168 |
-
# Insert 'Sample' if missing
|
| 169 |
-
if "Sample" not in df_sorted_custom.columns:
|
| 170 |
-
df_sorted_custom.insert(0, 'Sample', range(1, len(df_sorted_custom) + 1))
|
| 171 |
-
|
| 172 |
-
# Calculate donor info
|
| 173 |
-
df_sorted_custom['# donors'] = df_sorted_custom.iloc[:, 1:].sum(axis=1)
|
| 174 |
-
df_sorted_custom['volume donors (µl)'] = 32 / df_sorted_custom['# donors']
|
| 175 |
-
|
| 176 |
-
# Generate robot script
|
| 177 |
-
robot_script_custom = []
|
| 178 |
-
for i, col in enumerate(df_sorted_custom.columns[1:33]): # 32 columns after Sample
|
| 179 |
-
for row_idx, sample in df_sorted_custom.iterrows():
|
| 180 |
-
if sample[col] == 1:
|
| 181 |
-
source = source_wells[i]
|
| 182 |
-
dest = f"A{sample['Sample']}"
|
| 183 |
-
vol = round(sample['volume donors (µl)'], 2)
|
| 184 |
-
robot_script_custom.append({'Source': source, 'Destination': dest, 'Volume': vol})
|
| 185 |
-
|
| 186 |
-
robot_script_custom_df = pd.DataFrame(robot_script_custom)
|
| 187 |
-
st.dataframe(robot_script_custom_df)
|
| 188 |
-
st.download_button("Download Custom Order Robot Script CSV", robot_script_custom_df.to_csv(index=False), "robot_script_custom_order.csv", key="download_csv_tab1_robot_custom")
|
| 189 |
-
|
| 190 |
-
# Tab 2: EF → Binary
|
| 191 |
-
with tab2:
|
| 192 |
-
st.write("Upload an Editing Frequency CSV or enter manually:")
|
| 193 |
-
st.write("**Note:** Please upload CSV files **without column headers**, in ascending order from 3244 to 4882.")
|
| 194 |
-
ef_file = st.file_uploader("Upload EF CSV", type=["csv"], key="ef")
|
| 195 |
-
|
| 196 |
-
if ef_file:
|
| 197 |
-
ef_df = pd.read_csv(ef_file, header=None)
|
| 198 |
-
ef_df.columns = [str(site) for site in sorted(mutation_site_headers_actual)]
|
| 199 |
-
else:
|
| 200 |
-
ef_df = pd.DataFrame(columns=[str(site) for site in sorted(mutation_site_headers_actual)])
|
| 201 |
-
|
| 202 |
-
edited_df = st.data_editor(ef_df, num_rows="dynamic")
|
| 203 |
-
|
| 204 |
-
if st.button("Convert to Binary Labels", key="convert_button_tab2"):
|
| 205 |
-
binary_part = pd.DataFrame()
|
| 206 |
-
for col in sorted(mutation_site_headers_actual):
|
| 207 |
-
col_str = str(col)
|
| 208 |
-
threshold = thresholds_actual[col]
|
| 209 |
-
binary_part[col_str] = (edited_df[col_str].astype(float) >= threshold).astype(int)
|
| 210 |
-
|
| 211 |
-
binary_reordered = binary_part[[str(h) for h in mutation_site_headers if str(h) in binary_part.columns]]
|
| 212 |
-
|
| 213 |
-
def color_binary(val):
|
| 214 |
-
if val == 1: return "background-color: lightgreen"
|
| 215 |
-
if val == 0: return "background-color: lightcoral"
|
| 216 |
-
return ""
|
| 217 |
-
|
| 218 |
-
st.subheader("Binary Labels (Reordered 4402→3244, 4882→4455)")
|
| 219 |
-
styled = binary_reordered.style.applymap(color_binary)
|
| 220 |
-
st.dataframe(styled)
|
| 221 |
-
st.download_button("Download CSV", binary_reordered.to_csv(index=False), "ef_binary_labels.csv", key="download_csv_tab2_csv")
|
| 222 |
-
|
| 223 |
-
all_bits = binary_reordered.values.flatten().tolist()
|
| 224 |
-
decoded_string = binary_labels_to_string(all_bits)
|
| 225 |
-
st.subheader("Decoded String (continuous across rows)")
|
| 226 |
-
st.write(decoded_string)
|
| 227 |
-
|
| 228 |
-
st.subheader("Binary Labels (Ascending 3244→4882)")
|
| 229 |
-
st.dataframe(binary_part.style.applymap(color_binary))
|
| 230 |
-
st.download_button("Download Ascending Order CSV", binary_part.to_csv(index=False), "ef_binary_labels_ascending.csv", key="download_csv_tab2_ascend")
|
| 231 |
-
|
| 232 |
-
all_bits = binary_part.values.flatten().tolist()
|
| 233 |
-
decoded_string = binary_labels_to_string(all_bits)
|
| 234 |
-
st.subheader("Decoded String (continuous across rows)")
|
| 235 |
-
st.write(decoded_string)
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
# Mutation site headers did not remove 3614,
|
| 239 |
-
mutation_site_headers_actual_3614 = [
|
| 240 |
-
3244, 3297, 3350, 3399, 3455, 3509, 3562, 3614,
|
| 241 |
-
3665, 3720, 3773, 3824, 3879, 3933, 3985, 4039,
|
| 242 |
-
4089, 4145, 4190, 4245, 4298, 4349, 4402, 4455,
|
| 243 |
-
4510, 4561, 4615, 4668, 4720, 4773, 4828, 4882
|
| 244 |
-
]
|
| 245 |
-
# Thresholds for each mutation site removed 3614: 0.091557752,
|
| 246 |
-
thresholds_actual_3614 = pd.Series({
|
| 247 |
-
3244: 1.096910677, 3297: 0.923658795, 3350: 0.668939037, 3399: 0.914305214,
|
| 248 |
-
3455: 1.297392984, 3509: 1.812636208, 3562: 1.185047484, 3614: 0.157969131375,
|
| 249 |
-
3665: 0.298007308, 3720: 0.58857544, 3773: 0.882561082, 3824: 1.149082617,
|
| 250 |
-
3879: 0.816050702, 3933: 2.936517653, 3985: 1.597166791, 4039: 0.962108082,
|
| 251 |
-
4089: 1.479783497, 4145: 0.305853225, 4190: 1.311869541, 4245: 1.707556905,
|
| 252 |
-
4298: 0.875013076, 4349: 1.227704526, 4402: 0.593206446, 4455: 1.179633137,
|
| 253 |
-
4510: 1.272477799, 4561: 1.293841573, 4615: 1.16821885, 4668: 1.40306,
|
| 254 |
-
4720: 0.706530878, 4773: 1.483114072, 4828: 0.954939873, 4882: 1.47524328
|
| 255 |
-
})
|
| 256 |
-
|
| 257 |
-
# Mutation site headers reordered: 4402 to 3244, 4882 to 4455
|
| 258 |
-
mutation_site_headers_3614 = [
|
| 259 |
-
4402, 4349, 4298, 4245, 4190, 4145, 4089, 4039,
|
| 260 |
-
3985, 3933, 3879, 3824, 3773, 3720, 3665, 3614,
|
| 261 |
-
3562, 3509, 3455, 3399, 3350, 3297, 3244, # 1–23
|
| 262 |
-
4882, 4828, 4773, 4720, 4668, 4615, 4561, 4510, 4455 # 24–32
|
| 263 |
-
]
|
| 264 |
-
|
| 265 |
-
# Thresholds reordered accordingly
|
| 266 |
-
thresholds_3614 = pd.Series({h: thresholds_actual_3614[h] for h in mutation_site_headers_3614})
|
| 267 |
-
|
| 268 |
-
# === Utility functions ===
|
| 269 |
-
|
| 270 |
-
reverse_voyager_table = {v: k for k, v in voyager_table.items()}
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
# Tab 3: Text to Binary (32)
|
| 274 |
-
with tab3:
|
| 275 |
-
user_input_32 = st.text_input("Enter text", value="DNA", key="input_text_32")
|
| 276 |
-
if user_input_32:
|
| 277 |
-
ascii_codes = [ord(c) for c in user_input_32]
|
| 278 |
-
binary_labels = string_to_binary_labels(user_input_32)
|
| 279 |
-
|
| 280 |
-
st.subheader("ASCII Codes")
|
| 281 |
-
st.write(ascii_codes)
|
| 282 |
-
|
| 283 |
-
st.subheader("Binary Labels per Character")
|
| 284 |
-
grouped = [binary_labels[i:i+6] for i in range(0, len(binary_labels), 6)]
|
| 285 |
-
for i, bits in enumerate(grouped):
|
| 286 |
-
st.write(f"'{user_input_32[i]}' → {bits}")
|
| 287 |
-
|
| 288 |
-
st.subheader("Binary Labels (32-bit groups)")
|
| 289 |
-
groups = []
|
| 290 |
-
for i in range(0, len(binary_labels), 32):
|
| 291 |
-
group = binary_labels[i:i+32]
|
| 292 |
-
group += [0] * (32 - len(group))
|
| 293 |
-
groups.append(group + [sum(group)])
|
| 294 |
-
|
| 295 |
-
df = pd.DataFrame(groups, columns=[str(h) for h in mutation_site_headers_3614] + ["Edited Sites"])
|
| 296 |
-
st.dataframe(df)
|
| 297 |
-
st.download_button("Download as CSV", df.to_csv(index=False), "text_32_binary_labels.csv", key="download_csv_tab3_csv")
|
| 298 |
-
|
| 299 |
-
ascending_headers = sorted(mutation_site_headers_actual_3614)
|
| 300 |
-
df_sorted = df[[str(h) for h in ascending_headers if str(h) in df.columns]]
|
| 301 |
-
st.subheader("Binary Labels (Ascending Order 3244 → 4882)")
|
| 302 |
-
st.dataframe(df_sorted)
|
| 303 |
-
st.download_button("Download Ascending Order CSV", df_sorted.to_csv(index=False), "text_binary_labels_ascending.csv", key="download_csv_tab3_ascend")
|
| 304 |
-
|
| 305 |
-
# === Robot Preparation Script Generation ===
|
| 306 |
-
st.subheader("Robot Preparation Script")
|
| 307 |
-
robot_template = pd.read_csv("/home/user/app/Robot.csv", skiprows=3)
|
| 308 |
-
robot_template.columns = ['Labware', 'Source', 'Labware_2', 'Destination', 'Volume', 'Tool', 'Name']
|
| 309 |
-
|
| 310 |
-
# Add Sample numbers for well referencing
|
| 311 |
-
df_sorted.insert(0, 'Sample', range(1, len(df_sorted)+1))
|
| 312 |
-
|
| 313 |
-
# Step 1: Count the number of edited sites per row
|
| 314 |
-
df_sorted['# donors'] = df_sorted.iloc[:, 1:].sum(axis=1)
|
| 315 |
-
|
| 316 |
-
# Step 2: Calculate volume per donor (32 / # donors)
|
| 317 |
-
df_sorted['volume donors (µl)'] = 32 / df_sorted['# donors']
|
| 318 |
-
|
| 319 |
-
# Step 3: Generate the robot script
|
| 320 |
-
robot_script = []
|
| 321 |
-
source_wells = robot_template['Source'].unique().tolist()[:32]
|
| 322 |
-
|
| 323 |
-
for i, col in enumerate(df_sorted.columns[1:33]):
|
| 324 |
-
for row_idx, sample in df_sorted.iterrows():
|
| 325 |
-
if sample[col] == 1:
|
| 326 |
-
source = source_wells[i]
|
| 327 |
-
dest = f"A{sample['Sample']}"
|
| 328 |
-
vol = round(sample['volume donors (µl)'], 2)
|
| 329 |
-
robot_script.append({'Source': source, 'Destination': dest, 'Volume': vol})
|
| 330 |
-
|
| 331 |
-
robot_script_df = pd.DataFrame(robot_script)
|
| 332 |
-
st.dataframe(robot_script_df)
|
| 333 |
-
st.download_button("Download Robot Script CSV", robot_script_df.to_csv(index=False), "robot_script.csv", key="download_csv_tab3_robot")
|
| 334 |
-
|
| 335 |
-
|
| 336 |
-
# Tab 4: EF → Binary (32)
|
| 337 |
-
with tab4:
|
| 338 |
-
st.write("Upload an Editing Frequency CSV or enter manually:")
|
| 339 |
-
st.write("**Note:** Please upload CSV files **without column headers**, in ascending order from 3244 to 4882.")
|
| 340 |
-
ef_file_2 = st.file_uploader("Upload EF CSV", type=["csv"], key="ef2")
|
| 341 |
-
|
| 342 |
-
if ef_file_2:
|
| 343 |
-
ef_df = pd.read_csv(ef_file_2, header=None)
|
| 344 |
-
ef_df.columns = [str(site) for site in sorted(mutation_site_headers_actual_3614)]
|
| 345 |
-
else:
|
| 346 |
-
ef_df = pd.DataFrame(columns=[str(site) for site in sorted(mutation_site_headers_actual_3614)])
|
| 347 |
-
|
| 348 |
-
edited_df = st.data_editor(ef_df, num_rows="dynamic")
|
| 349 |
-
|
| 350 |
-
if st.button("Convert to Binary Labels", key="convert_button_tab4"):
|
| 351 |
-
binary_part = pd.DataFrame()
|
| 352 |
-
for col in sorted(mutation_site_headers_actual_3614):
|
| 353 |
-
col_str = str(col)
|
| 354 |
-
threshold = thresholds_actual_3614[col]
|
| 355 |
-
binary_part[col_str] = (edited_df[col_str].astype(float) >= threshold).astype(int)
|
| 356 |
-
|
| 357 |
-
binary_reordered = binary_part[[str(h) for h in mutation_site_headers_3614 if str(h) in binary_part.columns]]
|
| 358 |
-
|
| 359 |
-
def color_binary(val):
|
| 360 |
-
if val == 1: return "background-color: lightgreen"
|
| 361 |
-
if val == 0: return "background-color: lightcoral"
|
| 362 |
-
return ""
|
| 363 |
-
|
| 364 |
-
st.subheader("Binary Labels (Reordered 4402→3244, 4882→4455)")
|
| 365 |
-
styled = binary_reordered.style.applymap(color_binary)
|
| 366 |
-
st.dataframe(styled)
|
| 367 |
-
st.download_button("Download CSV", binary_reordered.to_csv(index=False), "ef_binary_labels.csv", key="download_csv_tab4_csv")
|
| 368 |
-
|
| 369 |
-
all_bits = binary_reordered.values.flatten().tolist()
|
| 370 |
-
decoded_string = binary_labels_to_string(all_bits)
|
| 371 |
-
st.subheader("Decoded String (continuous across rows)")
|
| 372 |
-
st.write(decoded_string)
|
| 373 |
-
|
| 374 |
-
st.subheader("Binary Labels (Ascending 3244→4882)")
|
| 375 |
-
st.dataframe(binary_part.style.applymap(color_binary))
|
| 376 |
-
st.download_button("Download Ascending Order CSV", binary_part.to_csv(index=False), "ef_binary_labels_ascending.csv", key="download_csv_tab4_ascend")
|
| 377 |
-
|
| 378 |
-
all_bits = binary_part.values.flatten().tolist()
|
| 379 |
-
decoded_string = binary_labels_to_string(all_bits)
|
| 380 |
-
st.subheader("Decoded String (continuous across rows)")
|
| 381 |
-
st.write(decoded_string)
|
| 382 |
-
|
| 383 |
-
def get_well_position(sample_index):
|
| 384 |
-
"""
|
| 385 |
-
Convert sample index (1-based) into well position (e.g., A1, A2, ..., B1, B2, ..., etc.)
|
| 386 |
-
"""
|
| 387 |
-
row_letter = chr(65 + (sample_index - 1) // 12) # 65 = 'A'
|
| 388 |
-
col_number = ((sample_index - 1) % 12) + 1
|
| 389 |
-
return f"{row_letter}{col_number}"
|
| 390 |
-
|
| 391 |
-
# # Tab 5: Binary → String
|
| 392 |
-
# with tab5:
|
| 393 |
-
# st.header("Decode Binary Labels to String")
|
| 394 |
-
|
| 395 |
-
# # Utility: Track source volumes and update if exceeds limit
|
| 396 |
-
# def track_and_replace_source(source_list, robot_script, volume_limit=180):
|
| 397 |
-
# source_volumes = {}
|
| 398 |
-
# adjusted_sources = []
|
| 399 |
-
|
| 400 |
-
# for entry in robot_script:
|
| 401 |
-
# src = entry['Source']
|
| 402 |
-
# vol = entry['Volume']
|
| 403 |
-
|
| 404 |
-
# if src not in source_volumes:
|
| 405 |
-
# source_volumes[src] = 0
|
| 406 |
-
|
| 407 |
-
# source_volumes[src] += vol
|
| 408 |
-
|
| 409 |
-
# if source_volumes[src] > volume_limit:
|
| 410 |
-
# row_letter = src[0]
|
| 411 |
-
# col_number = src[1:]
|
| 412 |
-
# new_row_letter = chr(ord(row_letter) + 4)
|
| 413 |
-
# new_src = f"{new_row_letter}{col_number}"
|
| 414 |
-
# entry['Source'] = new_src
|
| 415 |
-
|
| 416 |
-
# if new_src not in source_volumes:
|
| 417 |
-
# source_volumes[new_src] = 0
|
| 418 |
-
# source_volumes[new_src] += vol
|
| 419 |
-
# source_volumes[src] -= vol
|
| 420 |
-
|
| 421 |
-
# adjusted_sources.append(entry)
|
| 422 |
-
|
| 423 |
-
# return adjusted_sources, source_volumes
|
| 424 |
-
|
| 425 |
-
# # Utility: Generate fixed-volume D source to all sample wells
|
| 426 |
-
# def generate_fixed_d_source_instructions_to_all_samples(n_samples, fixed_volume=16, volume_limit=170):
|
| 427 |
-
# d_source_volumes = {}
|
| 428 |
-
# d_source_script = []
|
| 429 |
-
# current_d_index = 1
|
| 430 |
-
|
| 431 |
-
# for i in range(n_samples):
|
| 432 |
-
# dest = get_well_position(i + 1)
|
| 433 |
-
# current_d_well = f"D{current_d_index}"
|
| 434 |
-
|
| 435 |
-
# if current_d_well not in d_source_volumes:
|
| 436 |
-
# d_source_volumes[current_d_well] = 0
|
| 437 |
-
|
| 438 |
-
# if d_source_volumes[current_d_well] + fixed_volume > volume_limit:
|
| 439 |
-
# current_d_index += 1
|
| 440 |
-
# current_d_well = f"D{current_d_index}"
|
| 441 |
-
# d_source_volumes[current_d_well] = 0
|
| 442 |
-
|
| 443 |
-
# d_source_volumes[current_d_well] += fixed_volume
|
| 444 |
-
# tool = 'TS_10' if fixed_volume < 10 else 'TS_50'
|
| 445 |
-
|
| 446 |
-
# d_source_script.append({
|
| 447 |
-
# 'Source': current_d_well,
|
| 448 |
-
# 'Destination': dest,
|
| 449 |
-
# 'Volume': fixed_volume,
|
| 450 |
-
# 'Tool': tool
|
| 451 |
-
# })
|
| 452 |
-
|
| 453 |
-
# return d_source_script, d_source_volumes
|
| 454 |
-
|
| 455 |
-
# def generate_source_wells(n):
|
| 456 |
-
# wells = []
|
| 457 |
-
# rows = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
|
| 458 |
-
# for i in range(n):
|
| 459 |
-
# row = rows[i // 12] # cycle through A, B, C...
|
| 460 |
-
# col = (i % 12) + 1 # 1 to 12
|
| 461 |
-
# wells.append(f"{row}{col}")
|
| 462 |
-
# return wells
|
| 463 |
-
|
| 464 |
-
# st.subheader("Binary per Row")
|
| 465 |
-
# st.write("Upload CSV with any number of columns (0 or 1), no headers, from EF Binary format or enter manually below.")
|
| 466 |
-
|
| 467 |
-
# binary32_file = st.file_uploader("Upload Binary CSV", type=["csv"], key="binary_any")
|
| 468 |
-
|
| 469 |
-
# st.subheader("Optional Metadata (Optional)")
|
| 470 |
-
# barcode_id_input = st.text_input("Barcode ID (applied to all rows, optional)", value="")
|
| 471 |
-
# labware_source_input = st.text_input("Labware for Source (optional, default = 1)", value="1")
|
| 472 |
-
# labware_dest_input = st.text_input("Labware for Destination (optional, default = 1)", value="1")
|
| 473 |
-
# name_input = st.text_input("Name field (optional, default = blank)", value="")
|
| 474 |
-
|
| 475 |
-
# if binary32_file:
|
| 476 |
-
# df_32 = pd.read_csv(binary32_file, header=None)
|
| 477 |
-
# df_32.columns = [str(h) for h in range(1, len(df_32.columns)+1)]
|
| 478 |
-
# else:
|
| 479 |
-
# df_32 = st.data_editor(
|
| 480 |
-
# pd.DataFrame(columns=[str(h) for h in range(1, 33)]),
|
| 481 |
-
# num_rows="dynamic",
|
| 482 |
-
# key="manual_any_input"
|
| 483 |
-
# )
|
| 484 |
-
|
| 485 |
-
# if not df_32.empty:
|
| 486 |
-
# st.subheader("Binary Labels (Uploaded)")
|
| 487 |
-
# st.dataframe(df_32.style.applymap(lambda v: "background-color: lightgreen" if v == 1 else "background-color: lightcoral"))
|
| 488 |
-
# st.download_button("Download CSV", df_32.to_csv(index=False), "decoded_binary_uploaded.csv", key="download_csv_uploaded")
|
| 489 |
-
|
| 490 |
-
# decoded = binary_labels_to_string(df_32.values.flatten().astype(int).tolist())
|
| 491 |
-
# st.subheader("Decoded String")
|
| 492 |
-
# st.write(decoded)
|
| 493 |
-
# st.download_button("Download Concatenated Output", decoded, "decoded_binary_string.txt", key="download_txt_any")
|
| 494 |
-
|
| 495 |
-
# st.subheader("Robot Preparation Script from Binary")
|
| 496 |
-
|
| 497 |
-
# df_32_robot = df_32.copy()
|
| 498 |
-
# df_32_robot.insert(0, 'Sample', range(1, len(df_32_robot)+1))
|
| 499 |
-
# df_32_robot['# donors'] = df_32_robot.iloc[:, 1:].astype(int).sum(axis=1)
|
| 500 |
-
# df_32_robot['volume donors (µl)'] = 64 / df_32_robot['# donors']
|
| 501 |
-
|
| 502 |
-
# robot_script_32 = []
|
| 503 |
-
# source_wells_32 = generate_source_wells(df_32.shape[1])
|
| 504 |
-
|
| 505 |
-
# for i, col in enumerate(df_32.columns):
|
| 506 |
-
# for row_idx, sample in df_32_robot.iterrows():
|
| 507 |
-
# if int(sample[col]) == 1:
|
| 508 |
-
# source = source_wells_32[i]
|
| 509 |
-
# dest = get_well_position(int(sample['Sample']))
|
| 510 |
-
# vol = round(sample['volume donors (µl)'], 2)
|
| 511 |
-
# tool = 'TS_10' if vol < 10 else 'TS_50'
|
| 512 |
-
# robot_script_32.append({
|
| 513 |
-
# 'Source': source,
|
| 514 |
-
# 'Destination': dest,
|
| 515 |
-
# 'Volume': vol,
|
| 516 |
-
# 'Tool': tool
|
| 517 |
-
# })
|
| 518 |
-
|
| 519 |
-
# robot_script_32, source_volumes_32 = track_and_replace_source(source_wells_32, robot_script_32)
|
| 520 |
-
|
| 521 |
-
# d_script, d_volumes = generate_fixed_d_source_instructions_to_all_samples(len(df_32_robot))
|
| 522 |
-
# full_robot_script = robot_script_32 + d_script
|
| 523 |
-
|
| 524 |
-
# robot_script_32_df = pd.DataFrame(full_robot_script)
|
| 525 |
-
# robot_script_32_df.insert(0, 'Barcode ID', barcode_id_input)
|
| 526 |
-
# robot_script_32_df.insert(1, 'Labware_Source', labware_source_input)
|
| 527 |
-
# robot_script_32_df.insert(3, 'Labware_Destination', labware_dest_input)
|
| 528 |
-
# robot_script_32_df['Name'] = name_input
|
| 529 |
-
# robot_script_32_df = robot_script_32_df[['Barcode ID', 'Labware_Source', 'Source', 'Labware_Destination', 'Destination', 'Volume', 'Tool', 'Name']]
|
| 530 |
-
|
| 531 |
-
# st.dataframe(robot_script_32_df)
|
| 532 |
-
# st.download_button("Download Robot Script", robot_script_32_df.to_csv(index=False), "robot_script.csv", key="download_robot_any")
|
| 533 |
-
|
| 534 |
-
# st.subheader("Total Volume Used Per Source")
|
| 535 |
-
# combined_volumes = {**source_volumes_32, **d_volumes}
|
| 536 |
-
# source_volume_df = pd.DataFrame(list(combined_volumes.items()), columns=['Source', 'Total Volume (µl)'])
|
| 537 |
-
# st.dataframe(source_volume_df)
|
| 538 |
-
# st.download_button("Download Source Volumes", source_volume_df.to_csv(index=False), "source_total_volumes.csv", key="download_volume_any")
|
| 539 |
-
|
| 540 |
-
# Tab 5: Binary → String
|
| 541 |
-
with tab5:
|
| 542 |
-
st.header("Decode Binary Labels to String")
|
| 543 |
-
|
| 544 |
-
# Utility: Track source volumes and update if exceeds limit
|
| 545 |
-
def track_and_replace_source(source_list, robot_script, volume_limit=150):
|
| 546 |
-
source_volumes = {}
|
| 547 |
-
adjusted_sources = []
|
| 548 |
-
|
| 549 |
-
for entry in robot_script:
|
| 550 |
-
src = entry['Source']
|
| 551 |
-
vol = entry['Volume']
|
| 552 |
-
|
| 553 |
-
if src not in source_volumes:
|
| 554 |
-
source_volumes[src] = 0
|
| 555 |
-
|
| 556 |
-
source_volumes[src] += vol
|
| 557 |
-
|
| 558 |
-
if source_volumes[src] > volume_limit:
|
| 559 |
-
row_letter = src[0]
|
| 560 |
-
col_number = src[1:]
|
| 561 |
-
new_row_letter = chr(ord(row_letter) + 4)
|
| 562 |
-
new_src = f"{new_row_letter}{col_number}"
|
| 563 |
-
entry['Source'] = new_src
|
| 564 |
-
|
| 565 |
-
if new_src not in source_volumes:
|
| 566 |
-
source_volumes[new_src] = 0
|
| 567 |
-
source_volumes[new_src] += vol
|
| 568 |
-
source_volumes[src] -= vol
|
| 569 |
-
|
| 570 |
-
adjusted_sources.append(entry)
|
| 571 |
-
|
| 572 |
-
return adjusted_sources, source_volumes
|
| 573 |
-
|
| 574 |
-
# Utility: Generate fixed-volume D source to all sample wells
|
| 575 |
-
def generate_fixed_d_source_instructions_to_all_samples(n_samples, fixed_volume=16, volume_limit=170):
|
| 576 |
-
d_source_volumes = {}
|
| 577 |
-
d_source_script = []
|
| 578 |
-
current_d_index = 1
|
| 579 |
-
|
| 580 |
-
for i in range(n_samples):
|
| 581 |
-
dest = get_well_position(i + 1)
|
| 582 |
-
current_d_well = f"D{current_d_index}"
|
| 583 |
-
|
| 584 |
-
if current_d_well not in d_source_volumes:
|
| 585 |
-
d_source_volumes[current_d_well] = 0
|
| 586 |
-
|
| 587 |
-
if d_source_volumes[current_d_well] + fixed_volume > volume_limit:
|
| 588 |
-
current_d_index += 1
|
| 589 |
-
current_d_well = f"D{current_d_index}"
|
| 590 |
-
d_source_volumes[current_d_well] = 0
|
| 591 |
-
|
| 592 |
-
d_source_volumes[current_d_well] += fixed_volume
|
| 593 |
-
|
| 594 |
-
# Split if >10 and assign TS_10
|
| 595 |
-
if fixed_volume > 10:
|
| 596 |
-
half_vol = round(fixed_volume / 2, 2)
|
| 597 |
-
d_source_script.append({
|
| 598 |
-
'Source': current_d_well,
|
| 599 |
-
'Destination': dest,
|
| 600 |
-
'Volume': half_vol,
|
| 601 |
-
'Tool': 'TS_10'
|
| 602 |
-
})
|
| 603 |
-
d_source_script.append({
|
| 604 |
-
'Source': current_d_well,
|
| 605 |
-
'Destination': dest,
|
| 606 |
-
'Volume': fixed_volume - half_vol,
|
| 607 |
-
'Tool': 'TS_10'
|
| 608 |
-
})
|
| 609 |
-
else:
|
| 610 |
-
d_source_script.append({
|
| 611 |
-
'Source': current_d_well,
|
| 612 |
-
'Destination': dest,
|
| 613 |
-
'Volume': fixed_volume,
|
| 614 |
-
'Tool': 'TS_10'
|
| 615 |
-
})
|
| 616 |
-
|
| 617 |
-
return d_source_script, d_source_volumes
|
| 618 |
-
|
| 619 |
-
def generate_source_wells(n):
|
| 620 |
-
wells = []
|
| 621 |
-
rows = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
|
| 622 |
-
for i in range(n):
|
| 623 |
-
row = rows[i // 12] # cycle through A, B, C...
|
| 624 |
-
col = (i % 12) + 1 # 1 to 12
|
| 625 |
-
wells.append(f"{row}{col}")
|
| 626 |
-
return wells
|
| 627 |
-
|
| 628 |
-
st.subheader("Binary per Row")
|
| 629 |
-
st.write("Upload CSV with any number of columns (0 or 1), no headers, from EF Binary format or enter manually below.")
|
| 630 |
-
|
| 631 |
-
binary32_file = st.file_uploader("Upload Binary CSV", type=["csv"], key="binary_any")
|
| 632 |
-
|
| 633 |
-
st.subheader("Optional Metadata (Optional)")
|
| 634 |
-
barcode_id_input = st.text_input("Barcode ID (applied to all rows, optional)", value="")
|
| 635 |
-
labware_source_input = st.text_input("Labware for Source (optional, default = 1)", value="1")
|
| 636 |
-
labware_dest_input = st.text_input("Labware for Destination (optional, default = 1)", value="1")
|
| 637 |
-
name_input = st.text_input("Name field (optional, default = blank)", value="")
|
| 638 |
-
volume_limit_input = st.number_input("Maximum Volume Per Source Well (µl)", value=150)
|
| 639 |
-
|
| 640 |
-
if binary32_file:
|
| 641 |
-
df_32 = pd.read_csv(binary32_file, header=None)
|
| 642 |
-
df_32.columns = [str(h) for h in range(1, len(df_32.columns)+1)]
|
| 643 |
-
else:
|
| 644 |
-
df_32 = st.data_editor(
|
| 645 |
-
pd.DataFrame(columns=[str(h) for h in range(1, 33)]),
|
| 646 |
-
num_rows="dynamic",
|
| 647 |
-
key="manual_any_input"
|
| 648 |
-
)
|
| 649 |
-
|
| 650 |
-
if not df_32.empty:
|
| 651 |
-
st.subheader("Binary Labels (Uploaded)")
|
| 652 |
-
st.dataframe(df_32.style.applymap(lambda v: "background-color: lightgreen" if v == 1 else "background-color: lightcoral"))
|
| 653 |
-
st.download_button("Download CSV", df_32.to_csv(index=False), "decoded_binary_uploaded.csv", key="download_csv_uploaded")
|
| 654 |
-
|
| 655 |
-
decoded = binary_labels_to_string(df_32.values.flatten().astype(int).tolist())
|
| 656 |
-
st.subheader("Decoded String")
|
| 657 |
-
st.write(decoded)
|
| 658 |
-
st.download_button("Download Concatenated Output", decoded, "decoded_binary_string.txt", key="download_txt_any")
|
| 659 |
-
|
| 660 |
-
st.subheader("Robot Preparation Script from Binary")
|
| 661 |
-
|
| 662 |
-
df_32_robot = df_32.copy()
|
| 663 |
-
df_32_robot.insert(0, 'Sample', range(1, len(df_32_robot)+1))
|
| 664 |
-
df_32_robot['# donors'] = df_32_robot.iloc[:, 1:].astype(int).sum(axis=1)
|
| 665 |
-
df_32_robot['volume donors (µl)'] = 64 / df_32_robot['# donors']
|
| 666 |
-
|
| 667 |
-
robot_script_32 = []
|
| 668 |
-
source_wells_32 = generate_source_wells(df_32.shape[1])
|
| 669 |
-
|
| 670 |
-
for i, col in enumerate(df_32.columns):
|
| 671 |
-
for row_idx, sample in df_32_robot.iterrows():
|
| 672 |
-
if int(sample[col]) == 1:
|
| 673 |
-
source = source_wells_32[i]
|
| 674 |
-
dest = get_well_position(int(sample['Sample']))
|
| 675 |
-
vol = round(sample['volume donors (µl)'], 2)
|
| 676 |
-
|
| 677 |
-
if vol > 10:
|
| 678 |
-
half_vol = round(vol / 2, 2)
|
| 679 |
-
robot_script_32.append({
|
| 680 |
-
'Source': source,
|
| 681 |
-
'Destination': dest,
|
| 682 |
-
'Volume': half_vol,
|
| 683 |
-
'Tool': 'TS_10'
|
| 684 |
-
})
|
| 685 |
-
robot_script_32.append({
|
| 686 |
-
'Source': source,
|
| 687 |
-
'Destination': dest,
|
| 688 |
-
'Volume': vol - half_vol,
|
| 689 |
-
'Tool': 'TS_10'
|
| 690 |
-
})
|
| 691 |
-
else:
|
| 692 |
-
robot_script_32.append({
|
| 693 |
-
'Source': source,
|
| 694 |
-
'Destination': dest,
|
| 695 |
-
'Volume': vol,
|
| 696 |
-
'Tool': 'TS_10'
|
| 697 |
-
})
|
| 698 |
-
|
| 699 |
-
robot_script_32, source_volumes_32 = track_and_replace_source(source_wells_32, robot_script_32, volume_limit=volume_limit_input)
|
| 700 |
-
|
| 701 |
-
d_script, d_volumes = generate_fixed_d_source_instructions_to_all_samples(len(df_32_robot), fixed_volume=16, volume_limit=volume_limit_input)
|
| 702 |
-
full_robot_script = robot_script_32 + d_script
|
| 703 |
-
|
| 704 |
-
robot_script_32_df = pd.DataFrame(full_robot_script)
|
| 705 |
-
robot_script_32_df.insert(0, 'Barcode ID', barcode_id_input)
|
| 706 |
-
robot_script_32_df.insert(1, 'Labware_Source', labware_source_input)
|
| 707 |
-
robot_script_32_df.insert(3, 'Labware_Destination', labware_dest_input)
|
| 708 |
-
robot_script_32_df['Name'] = name_input
|
| 709 |
-
robot_script_32_df = robot_script_32_df[['Barcode ID', 'Labware_Source', 'Source', 'Labware_Destination', 'Destination', 'Volume', 'Tool', 'Name']]
|
| 710 |
-
|
| 711 |
-
st.dataframe(robot_script_32_df)
|
| 712 |
-
st.download_button("Download Robot Script", robot_script_32_df.to_csv(index=False), "robot_script.csv", key="download_robot_any")
|
| 713 |
-
|
| 714 |
-
st.subheader("Total Volume Used Per Source")
|
| 715 |
-
combined_volumes = {**source_volumes_32, **d_volumes}
|
| 716 |
-
source_volume_df = pd.DataFrame(list(combined_volumes.items()), columns=['Source', 'Total Volume (µl)'])
|
| 717 |
-
st.dataframe(source_volume_df)
|
| 718 |
-
st.download_button("Download Source Volumes", source_volume_df.to_csv(index=False), "source_total_volumes.csv", key="download_volume_any")
|
| 719 |
-
|
| 720 |
-
|
| 721 |
-
|
| 722 |
import streamlit as st
|
| 723 |
import pandas as pd
|
|
|
|
| 724 |
|
| 725 |
# === App Title ===
|
| 726 |
-
|
| 727 |
-
|
| 728 |
-
|
| 729 |
# === Voyager ASCII 6-bit conversion table ===
|
| 730 |
voyager_table = {
|
| 731 |
i: ch for i, ch in enumerate([
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
import pandas as pd
|
| 3 |
+
import math
|
| 4 |
|
| 5 |
# === App Title ===
|
| 6 |
+
st.set_page_config(page_title="Robot Script Generator", layout="wide")
|
| 7 |
+
st.title("🧪 Robot Script Generator")
|
| 8 |
+
|
| 9 |
# === Voyager ASCII 6-bit conversion table ===
|
| 10 |
voyager_table = {
|
| 11 |
i: ch for i, ch in enumerate([
|