wenjun99 commited on
Commit
852fe6e
·
verified ·
1 Parent(s): cf08198

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +202 -3
app.py CHANGED
@@ -1,12 +1,15 @@
1
  import streamlit as st
2
  import pandas as pd
3
  import io
 
 
 
4
 
5
  # =========================
6
  # Streamlit App Setup
7
  # =========================
8
  st.set_page_config(page_title="ASCII ↔ Binary Converter", layout="wide")
9
- st.title("🔢 ASCII ↔ Binary Converter")
10
 
11
  # =========================
12
  # Voyager ASCII 6-bit Table
@@ -47,7 +50,7 @@ def binary_labels_to_string(bits: list[int]) -> str:
47
  # =========================
48
  # Tabs
49
  # =========================
50
- tab1, tab2 = st.tabs(["Text → Binary", "Binary → Text"])
51
 
52
  # --------------------------------------------------
53
  # TAB 1: Text → Binary
@@ -63,7 +66,7 @@ with tab1:
63
 
64
  col1, col2 = st.columns([2, 1])
65
  with col1:
66
- group_size = st.slider("Select number of positions per group:", min_value=12, max_value=32, value=25)
67
  with col2:
68
  custom_cols = st.number_input("Or enter custom number:", min_value=1, max_value=128, value=group_size)
69
  if custom_cols != group_size:
@@ -165,3 +168,199 @@ with tab2:
165
  st.error(f"Error reading or converting file: {e}")
166
  else:
167
  st.info("👆 Upload a file to start the reverse conversion.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
  import pandas as pd
3
  import io
4
+ import re
5
+ import numpy as np
6
+
7
 
8
  # =========================
9
  # Streamlit App Setup
10
  # =========================
11
  st.set_page_config(page_title="ASCII ↔ Binary Converter", layout="wide")
12
+ st.title("ASCII ↔ Binary Converter")
13
 
14
  # =========================
15
  # Voyager ASCII 6-bit Table
 
50
  # =========================
51
  # Tabs
52
  # =========================
53
+ tab1, tab2, tab3 = st.tabs(["Text → Binary", "Binary → Text", "Robot Script"])
54
 
55
  # --------------------------------------------------
56
  # TAB 1: Text → Binary
 
66
 
67
  col1, col2 = st.columns([2, 1])
68
  with col1:
69
+ group_size = st.slider("Select number of positions per row:", min_value=12, max_value=32, value=25)
70
  with col2:
71
  custom_cols = st.number_input("Or enter custom number:", min_value=1, max_value=128, value=group_size)
72
  if custom_cols != group_size:
 
168
  st.error(f"Error reading or converting file: {e}")
169
  else:
170
  st.info("👆 Upload a file to start the reverse conversion.")
171
+
172
+ # --------------------------------------------------
173
+ # TAB 3: Pipetting Command Generator
174
+ # --------------------------------------------------
175
+
176
+
177
+ with tab3:
178
+ st.header("🧪 Pipetting Command Generator")
179
+ st.markdown("""
180
+ Upload your sample file (Excel, CSV, or TXT) containing binary mutation data.
181
+ The app will:
182
+ - Auto-detect or create `Sample`, `Position#`, `Total edited`, and `Volume per "1"` columns
183
+ - Generate pipetting commands dynamically for any number of positions
184
+ - Display and allow CSV download for both commands and source volume summaries
185
+ """)
186
+
187
+ uploaded = st.file_uploader("📤 Upload data file", type=["xlsx", "csv", "txt"])
188
+ max_per_well_ul = st.number_input("Maximum volume per source well (µL)", min_value=10.0, max_value=1000.0, value=160.0, step=10.0)
189
+
190
+ if uploaded is not None:
191
+ try:
192
+ # --- Load file ---
193
+ if uploaded.name.endswith(".xlsx"):
194
+ sheet_names = pd.ExcelFile(uploaded).sheet_names
195
+ sheet_choice = st.selectbox("Select sheet:", sheet_names)
196
+ df = pd.read_excel(uploaded, sheet_name=sheet_choice)
197
+ elif uploaded.name.endswith(".csv"):
198
+ df = pd.read_csv(uploaded)
199
+ else: # TXT
200
+ df = pd.read_csv(uploaded, sep="\t")
201
+
202
+ st.success(f"✅ Loaded file with {len(df)} rows and {len(df.columns)} columns")
203
+
204
+ # --- Clean column names ---
205
+ df.columns = [str(c).strip() for c in df.columns]
206
+
207
+ # --- Ensure Sample column ---
208
+ if not any(c.lower() == "sample" for c in df.columns):
209
+ df.insert(0, "Sample", np.arange(1, len(df) + 1))
210
+ st.info("`Sample` column missing — automatically generated 1..N.")
211
+
212
+ # --- Detect Position columns ---
213
+ position_cols = [c for c in df.columns if re.match(r"(?i)^position\s*\d+", c)]
214
+ if not position_cols:
215
+ # If not found, auto-generate for numeric-like columns
216
+ non_pos_cols = {"sample", "total edited", 'volume per "1"', "volume per 1"}
217
+ candidate_cols = [c for c in df.columns if c.lower() not in non_pos_cols]
218
+ if not candidate_cols:
219
+ st.error("❌ Could not detect any Position columns.")
220
+ st.stop()
221
+ position_cols = candidate_cols
222
+ st.info(f"Position columns inferred automatically: {len(position_cols)} detected.")
223
+
224
+ # --- Ensure Total edited ---
225
+ if "Total edited" not in df.columns:
226
+ df["Total edited"] = df[position_cols].apply(pd.to_numeric, errors="coerce").fillna(0).sum(axis=1).astype(int)
227
+ st.info("`Total edited` column missing — calculated automatically as sum of 1s per row.")
228
+
229
+ # --- Ensure Volume per "1" ---
230
+ vol_candidates = [c for c in df.columns if "volume per" in c.lower()]
231
+ if not vol_candidates:
232
+ df['Volume per "1"'] = 64 / df["Total edited"].replace(0, np.nan)
233
+ df['Volume per "1"'] = df['Volume per "1"'].fillna(0)
234
+ st.info('`Volume per "1"` column missing — calculated automatically as 64 / Total edited.')
235
+ volume_col = 'Volume per "1"'
236
+ else:
237
+ volume_col = vol_candidates[0]
238
+
239
+ # --- Constants ---
240
+ ROWS_96 = ["A", "B", "C", "D", "E", "F", "G", "H"]
241
+ COLS_96 = list(range(1, 13))
242
+ GROUP_START_COLS = [1, 5, 9]
243
+ num_positions = len(position_cols)
244
+
245
+ def well_name(row_letter, col_number):
246
+ return f"{row_letter}{col_number}"
247
+
248
+ def sample_index_to_plate_and_well(sample_idx):
249
+ plate_num = ((sample_idx - 1) // 96) + 1
250
+ within_plate = (sample_idx - 1) % 96
251
+ row_idx = within_plate // 12
252
+ col_idx = within_plate % 12
253
+ return plate_num, well_name(ROWS_96[row_idx], COLS_96[col_idx])
254
+
255
+ def source_index_to_group_coords(source_idx):
256
+ group_idx = source_idx - 1
257
+ plate_num = 1 + (group_idx // 24)
258
+ within_plate_group = group_idx % 24
259
+ row_idx = within_plate_group // 3
260
+ group_in_row = within_plate_group % 3
261
+ row_letter = ROWS_96[row_idx]
262
+ start_col = GROUP_START_COLS[group_in_row]
263
+ return plate_num, row_letter, start_col
264
+
265
+ def source_index_to_wells(source_idx):
266
+ plate_num, row_letter, start_col = source_index_to_group_coords(source_idx)
267
+ return [(plate_num, well_name(row_letter, start_col + offset)) for offset in range(4)]
268
+
269
+ def pick_tool(volume_ul):
270
+ return "TS_10" if volume_ul <= 10.0 else "TS_50"
271
+
272
+ # --- Core logic ---
273
+ commands = []
274
+ source_volume_totals = {}
275
+ per_source_well_cum = {i: [0.0, 0.0, 0.0, 0.0] for i in range(1, num_positions + 1)}
276
+ per_source_wells = {i: source_index_to_wells(i) for i in range(1, num_positions + 1)}
277
+
278
+ for _, row in df.iterrows():
279
+ sample_id = int(row["Sample"])
280
+ vol_per_one = float(row[volume_col])
281
+ if vol_per_one > max_per_well_ul:
282
+ st.warning(f"Volume per '1' ({vol_per_one} µL) exceeds per-well cap of {max_per_well_ul} µL for sample {sample_id}.")
283
+ dest_plate, dest_well = sample_index_to_plate_and_well(sample_id)
284
+ tool = pick_tool(vol_per_one)
285
+
286
+ for pos_idx, col in enumerate(position_cols, 1):
287
+ val = row[col]
288
+ try:
289
+ is_one = (float(val) == 1.0)
290
+ except:
291
+ is_one = str(val).strip() == "1"
292
+ if not is_one:
293
+ continue
294
+
295
+ wells_list = per_source_wells[pos_idx]
296
+ cum_list = per_source_well_cum[pos_idx]
297
+
298
+ chosen = None
299
+ for w_i, ((plate_num, well_str), current_vol) in enumerate(zip(wells_list, cum_list)):
300
+ if current_vol + vol_per_one <= max_per_well_ul:
301
+ chosen = (w_i, plate_num, well_str)
302
+ break
303
+ if chosen is None:
304
+ st.error(f"Source {pos_idx} has all 4 wells full. Cannot add {vol_per_one} µL.")
305
+ st.stop()
306
+
307
+ w_i, src_plate, src_well = chosen
308
+ cum_list[w_i] += vol_per_one
309
+ per_source_well_cum[pos_idx] = cum_list
310
+ source_volume_totals[(src_plate, src_well)] = source_volume_totals.get((src_plate, src_well), 0.0) + vol_per_one
311
+
312
+ commands.append({
313
+ "SourceIdx": pos_idx,
314
+ "Source plate": src_plate,
315
+ "Source well": src_well,
316
+ "Destination plate": dest_plate,
317
+ "Destination well": dest_well,
318
+ "Volume": round(vol_per_one, 2),
319
+ "Tool": tool
320
+ })
321
+
322
+ # --- Compile results ---
323
+ commands_df = pd.DataFrame(commands)
324
+ commands_df = commands_df.sort_values(
325
+ by=["SourceIdx", "Source plate", "Source well", "Destination plate", "Destination well"],
326
+ kind="stable"
327
+ )
328
+ commands_df = commands_df[["Source plate", "Source well", "Destination plate", "Destination well", "Volume", "Tool"]]
329
+
330
+ # --- Source summary ---
331
+ summary_rows = []
332
+ for src_idx in range(1, num_positions + 1):
333
+ for plate_num, well_str in per_source_wells[src_idx]:
334
+ total = source_volume_totals.get((plate_num, well_str), 0.0)
335
+ summary_rows.append({
336
+ "Source": src_idx,
337
+ "Source plate": plate_num,
338
+ "Source well": well_str,
339
+ "Total volume taken (µL)": round(total, 2)
340
+ })
341
+ summary_df = pd.DataFrame(summary_rows)
342
+
343
+ st.success(f"✅ Generated {len(commands_df)} commands across {num_positions} positions.")
344
+
345
+ st.markdown("### 💧 Pipetting Commands")
346
+ st.dataframe(commands_df, use_container_width=True, height=400)
347
+ st.download_button(
348
+ "⬇️ Download Commands CSV",
349
+ commands_df.to_csv(index=False),
350
+ "pipetting_commands.csv",
351
+ mime="text/csv"
352
+ )
353
+
354
+ st.markdown("### 📊 Source Volume Summary")
355
+ st.dataframe(summary_df, use_container_width=True, height=400)
356
+ st.download_button(
357
+ "⬇️ Download Source Summary CSV",
358
+ summary_df.to_csv(index=False),
359
+ "source_volume_summary.csv",
360
+ mime="text/csv"
361
+ )
362
+
363
+ except Exception as e:
364
+ st.error(f"❌ Error processing file: {e}")
365
+ else:
366
+ st.info("👆 Upload an Excel/CSV/TXT file to start generating pipetting commands.")