File size: 6,328 Bytes
7f5c744 584b04d 7f5c744 584b04d 7f5c744 584b04d 7f5c744 584b04d 7f5c744 584b04d 7f5c744 584b04d 7f5c744 584b04d 7f5c744 584b04d 7f5c744 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 |
import streamlit as st
import os
from setup import page_setup
from utilities import save_notebook, save_datasets
from grammar_exec import execute
from notebook import main_call
import pandas as pd
from pathlib import Path
col1, col2, col3 = st.columns([2, 4, 1])
with col2:
st.title(":blue[Great] Lens π΅οΈββοΈ")
# File upload section
st.subheader("π Upload Files")
col1, col2 = st.columns(2)
with col1:
st.markdown("**Upload Notebook**")
notebook = st.file_uploader(
label='Select Jupyter Notebook',
accept_multiple_files=False,
type=['ipynb'],
help="Upload a .ipynb file to analyze"
)
with col2:
st.markdown("**Upload Datasets (Optional)**")
datasets = st.file_uploader(
label='Select Dataset Files',
accept_multiple_files=True,
type=['csv', 'xlsx', 'xls', 'json', 'txt', 'parquet'],
help="Upload datasets that your notebook references"
)
# Display uploaded files info
if datasets:
st.info(f"π {len(datasets)} dataset(s) uploaded: {', '.join([f.name for f in datasets])}")
if notebook:
st.success(f"π Notebook uploaded: {notebook.name}")
# Save files to /tmp/Notebook
save_notebook(notebook)
if datasets:
save_datasets(datasets)
st.info("β
Datasets saved to notebook directory")
results_tab, grammar_tab = st.tabs(['Execution', 'Grammar/Fact'])
# with results_tab:
# with st.spinner("π Executing notebook..."):
# try:
# notebook_dir_path = Path("/tmp/Notebook")
# notebook_files = [f for f in notebook_dir_path.iterdir() if f.suffix == '.ipynb']
# if not notebook_files:
# st.error("No notebook found in directory")
# else:
# notebook_path = notebook_files[0]
# st.write(f'π Processing notebook: {notebook_path.name}')
# # Show available datasets
# dataset_files = [f for f in notebook_dir_path.iterdir()
# if f.suffix.lower() in ['.csv', '.xlsx', '.xls', '.json', '.txt', '.parquet']]
# if dataset_files:
# st.info(f"π Available datasets: {', '.join([f.name for f in dataset_files])}")
# results = main_call(notebook_path)
# # Display results in a more user-friendly way
# if isinstance(results, dict):
# col1, col2 = st.columns([1, 3])
# with col1:
# if results['status'] == 'Pass':
# st.success("β
**Status: PASSED**")
# else:
# st.error("β **Status: FAILED**")
# with col2:
# st.write(f"**Notebook:** {results['notebook']}")
# if results['error_message']:
# st.error(f"**Error:** {results['error_message']}")
# else:
# st.dataframe(results)
# except Exception as e:
# st.error(f"β Error processing notebook: {str(e)}")
with grammar_tab:
try:
with st.spinner("π Analyzing grammar and facts..."):
results = execute("/tmp/Notebook")
if not results.empty:
# Display grammar results in a more readable format
st.subheader("π Grammar & Style Analysis")
if 'Grammar_Text' in results.columns and len(results['Grammar_Text'].dropna()) > 0:
grammar_issues = results[results['Grammar_Text'].notna()]
for idx, row in grammar_issues.iterrows():
if row['Is Grammar Error?']:
st.warning(f"**Grammar Error:** {row['Grammar_Text']}")
st.info(f"**Suggestion:** {row['Grammar_Suggestions']}")
else:
st.info(f"**Style Suggestion:** {row['Grammar_Text']}")
st.success(f"**Improvement:** {row['Grammar_Suggestions']}")
st.divider()
st.subheader("π― Factual Accuracy Analysis")
if 'Fact_Text' in results.columns and len(results['Fact_Text'].dropna()) > 0:
fact_issues = results[results['Fact_Text'].notna()]
for idx, row in fact_issues.iterrows():
st.error(f"**Factual Error:** {row['Fact_Text']}")
st.success(f"**Correction:** {row['Fact_Suggestions']}")
st.divider()
# Show raw dataframe as well
with st.expander("π View Raw Results"):
st.dataframe(results)
else:
st.success("β
No grammar or factual issues found!")
except Exception as e:
st.error(f"β Unable to process grammar/facts: {str(e)}")
# Add some helpful information
st.sidebar.markdown("## π‘ How to Use")
st.sidebar.markdown("""
1. **Upload Notebook**: Select your .ipynb file
2. **Upload Datasets**: Add any CSV, Excel, or other data files your notebook uses
3. **Execution Tab**: See if your notebook runs successfully
4. **Grammar/Fact Tab**: Check for text quality and factual accuracy
### π§ Colab Support
The tool automatically handles Google Colab specific code:
- Replaces Drive mounts with local file access
- Uses your uploaded datasets instead of Colab file uploads
- Skips Colab-specific imports that won't work locally
""")
st.sidebar.markdown("## π Supported Formats")
st.sidebar.markdown("""
**Notebooks:** .ipynb
**Datasets:** .csv, .xlsx, .xls, .json, .txt, .parquet
""") |