Spaces:
Sleeping
Sleeping
| import marimo | |
| __generated_with = "0.18.0" | |
| app = marimo.App(width="medium") | |
| def _(): | |
| import marimo as mo | |
| import altair as alt | |
| import pandas as pd | |
| import re | |
| import plotly | |
| return alt, mo, pd, re | |
| def _(mo, pd, re): | |
| # 1. Load and Prepare Data with Filename Parsing | |
| def load_data(): | |
| # Using your specific dataset filename | |
| try: | |
| df = pd.read_csv('data/eb1_gemma_dataset_2020_plus.csv') | |
| except: | |
| # Fallback for local testing if 'data/' folder isn't present | |
| df = pd.read_csv('eb1_gemma_dataset_2020_plus.csv') | |
| # 2. Define the criteria columns to clean | |
| criteria_cols = [ | |
| 'prizes', 'memberships', 'published_material', 'judging', | |
| 'original_contributions', 'scholarly_articles', 'artistic_exhibitions', | |
| 'leading_critical_role', 'high_salary', 'commercial_success', | |
| 'comparable_evidence', 'final_merits_determination' | |
| ] | |
| # 3. Define allowed values | |
| allowed = ['Met', 'Unmet', 'Not Discussed'] | |
| # 4. Apply cleaning: If value not in allowed, make it 'Unmet' | |
| for col in criteria_cols: | |
| if col in df.columns: | |
| # Strip whitespace and normalize to Title case (e.g., 'met' -> 'Met') | |
| df[col] = df[col].astype(str).str.strip().str.title() | |
| # Use .apply to enforce the rule: Met, Unmet, or Not Discussed | |
| # Anything else (NaN, 'Unknown', 'Not Reached') becomes 'Unmet' | |
| df[col] = df[col].apply(lambda x: x if x in allowed else 'Unmet') | |
| def parse_filename_date(filename): | |
| # Regex looks for 3 letters (Month) followed by digits (Day/Year) | |
| # Example: AUG202021 -> AUG 20 2021 | |
| match = re.search(r'([A-Z]{3})(\d{2})(\d{4})', str(filename)) | |
| if match: | |
| month_str, day, year = match.groups() | |
| date_str = f"{month_str} {day} {year}" | |
| return pd.to_datetime(date_str, format='%b %d %Y', errors='coerce') | |
| return pd.NaT | |
| # Apply transformations | |
| df['date'] = df['filename'].apply(parse_filename_date) | |
| df['year'] = df['date'].dt.year.fillna(0).astype(int) | |
| # Sort by date so the story flows chronologically | |
| df = df.sort_values('date', ascending=False) | |
| return df | |
| df = load_data() | |
| return (df,) | |
| def _(df, mo): | |
| # 2. Filtering UI | |
| # We filter out year 0 (unparseable) for the slider range | |
| valid_years = df[df['year'] > 0]['year'] | |
| year_slider = mo.ui.range_slider( | |
| start=int(valid_years.min()), | |
| stop=int(valid_years.max()), | |
| step=1, | |
| value=[int(valid_years.min()), int(valid_years.max())], | |
| label="Select Decision Year Range" | |
| ) | |
| field_search = mo.ui.text(label="Search by Field of Endeavor", placeholder="e.g. Data (press Enter)") | |
| return field_search, year_slider | |
| def _(alt, df, field_search, mo, pd, year_slider): | |
| # 3. Reactive Logic & Chart Function | |
| # Filter the dataframe based on the slider value | |
| filtered_df = df[ | |
| (df['year'] >= year_slider.value[0]) & | |
| (df['year'] <= year_slider.value[1]) & | |
| (df['field'].str.contains(field_search.value, case=False, na=False)) | |
| ] | |
| def create_met_percentage_chart(data): | |
| if data.empty: | |
| return mo.md("### ⚠️ No data found for this selection.") | |
| criteria_cols = [ | |
| 'prizes', 'memberships', 'published_material', | |
| 'judging', 'original_contributions', 'scholarly_articles', | |
| 'artistic_exhibitions', 'leading_critical_role', 'high_salary', | |
| 'commercial_success' | |
| ] | |
| stats = [] | |
| for col in criteria_cols: | |
| if col in data.columns: | |
| met_count = data[col].astype(str).str.strip().str.lower().eq('met').sum() | |
| unmet_count = data[col].astype(str).str.strip().str.lower().eq('unmet').sum() | |
| evaluated_count = met_count + unmet_count | |
| percent_met = (met_count / evaluated_count) * 100 if evaluated_count > 0 else 0 | |
| stats.append({ | |
| 'Criterion': col.replace('_', ' ').title(), | |
| 'Met %': round(percent_met, 1), | |
| 'Count': int(met_count), | |
| 'Evaluated': int(evaluated_count) | |
| }) | |
| stats_df = pd.DataFrame(stats).sort_values(by='Met %', ascending=False) | |
| # Simplified chart - no layering, no base | |
| chart = alt.Chart(stats_df).mark_bar().encode( | |
| x=alt.X('Met %:Q', scale=alt.Scale(domain=[0, 105])), | |
| y=alt.Y('Criterion:N', sort='-x'), | |
| tooltip=['Criterion', 'Met %', 'Count', 'Evaluated'] | |
| ).properties( | |
| width='container', | |
| height=300 | |
| ) | |
| return mo.ui.altair_chart(chart) | |
| return create_met_percentage_chart, filtered_df | |
| def _(create_met_percentage_chart, field_search, filtered_df, mo, year_slider): | |
| # 4. Final Dashboard View | |
| mo.md( | |
| f""" | |
| # EB-1A AAO Case Explorer | |
| {mo.hstack([year_slider, field_search], justify="start", gap=2)} | |
| {create_met_percentage_chart(filtered_df)} | |
| *Showing {len(filtered_df)} cases for the selected period.* | |
| {mo.ui.table( | |
| filtered_df[['date', 'final_decision', 'field', 'prizes', 'judging', 'scholarly_articles', 'leading_critical_role', 'high_salary', 'memberships', | |
| 'published_material', 'original_contributions', | |
| 'artistic_exhibitions', | |
| 'commercial_success', | |
| 'denial_reason', 'filename']], | |
| pagination=True, | |
| show_download=False | |
| )} | |
| """ | |
| ) | |
| return | |
| def _(mo): | |
| mo.md(r""" | |
| # FAQ | |
| """) | |
| return | |
| def _(mo): | |
| mo.accordion( | |
| { | |
| "What is this tool?": mo.md( | |
| rf""" | |
| This dashboard is an interactive explorer for USCIS Administrative Appeals Office (AAO) decisions regarding EB-1A (Extraordinary Ability) petitions. It allows users to visualize which legal criteria (such as original contributions or scholarly articles) are most frequently "Met" or "Unmet" across different fields of endeavor and time periods. | |
| """ | |
| ) | |
| } | |
| ) | |
| return | |
| def _(mo): | |
| mo.accordion( | |
| { | |
| "Where does the data come from?": mo.md( | |
| rf""" | |
| The raw data is sourced from the [MasterControlAIML/EB1-AAO-Decisions](https://huggingface.co/datasets/MasterControlAIML/EB1-AAO-Decisions) dataset on Hugging Face. This repository contains thousands of publicly available legal decisions issued by the USCIS. | |
| """ | |
| ) | |
| } | |
| ) | |
| return | |
| def _(mo): | |
| mo.accordion( | |
| { | |
| "How was the dataset processed?": mo.md( | |
| rf""" | |
| - We use _pdfplumber_ to convert the raw PDF text into machine-readable strings. At this stage, files are filtered by year (2020+) based on the filename metadata. | |
| - After that, the extracted text is sent to _Google Gemini Gemma 3 (27B)_ via the Google GenAI API. The model acts as a "virtual paralegal," reading the complex legal prose and outputting a structured JSON object for each case. | |
| """ | |
| ) | |
| } | |
| ) | |
| return | |
| def _(mo): | |
| mo.accordion( | |
| { | |
| "What do the 'Met' and 'Unmet' values mean?": mo.md( | |
| rf""" | |
| - __Met__: The AAO adjudicator agreed that the evidence provided satisfied that specific regulatory criterion. | |
| - __Unmet__: The adjudicator found the evidence insufficient or the petitioner failed to argue that specific criterion successfully. | |
| - __Not Discussed__: The specific criterion was not a focus of that particular appeal (often because the petitioner did not claim it). | |
| """ | |
| ) | |
| } | |
| ) | |
| return | |
| if __name__ == "__main__": | |
| app.run() | |