File size: 4,860 Bytes
ec478de
 
 
 
 
 
 
2e0982a
386acb3
 
 
ec478de
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
386acb3
ec478de
386acb3
ec478de
 
 
386acb3
ec478de
 
 
 
386acb3
ec478de
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2e0982a
386acb3
ec478de
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
from pyngrok import ngrok
import threading
import time
import os

# ----------------- Streamlit Dashboard Code ------------------
dashboard_code = """
import streamlit as st
import pandas as pd
import io

# Helper to normalize college name
def normalize_name(name):
    if pd.isna(name):
        return ""
    return name.replace("(B)", "B").strip().lower()

# Transformation Logic
def transform_file(main_df, attendance_df):
    # Normalize college names in both DataFrames
    main_df['normalized_name'] = main_df.iloc[:, 2].apply(normalize_name)
    attendance_df['normalized_name'] = attendance_df.iloc[:, 2].apply(lambda x: str(x).strip().lower())

    # Create attendance lookup dictionary
    attendance_lookup = attendance_df.set_index('normalized_name')

    school_attendance_list = []
    inter_attendance_list = []

    for name in main_df['normalized_name']:
        if name in attendance_lookup.index:
            row = attendance_lookup.loc[name]
            school_attendance_list.append(row.iloc[6])  # 7th column
            inter_attendance_list.append(row.iloc[10]) # 11th column
        else:
            school_attendance_list.append(None)
            inter_attendance_list.append(None)

    # Construct final dataframe
    new_df = pd.DataFrame({
        'serial_no': main_df.iloc[:, 0],
        'district': main_df.iloc[:, 1],
        'college_name': main_df.iloc[:, 2],
        'class': 'School',
        'school_minority_sanction': main_df.iloc[:, 3],
        'school_minority_admitted': main_df.iloc[:, 4],
        'school_minority_vacancies': main_df.iloc[:, 3] - main_df.iloc[:, 4],
        'school_non_minority_sanction': main_df.iloc[:, 5],
        'school_non_minority_admitted': main_df.iloc[:, 6],
        'school_non_minority_vacancies': main_df.iloc[:, 5] - main_df.iloc[:, 6],
        'school_attendance': school_attendance_list,
        'school_attendance_percentage': '',  # Placeholder
        'class_grade': 'Intermediate',
        'inter_minority_sanction': main_df.iloc[:, 8],
        'inter_minority_admitted': main_df.iloc[:, 9],
        'inter_minority_vacancies': main_df.iloc[:, 8] - main_df.iloc[:, 9],
        'inter_non_minority_sanction': main_df.iloc[:, 10],
        'inter_non_minority_admitted': main_df.iloc[:, 11],
        'inter_non_minority_vacancies': main_df.iloc[:, 10] - main_df.iloc[:, 11],
        'inter_attendance': inter_attendance_list,
        'inter_attendance_percentage': ''  # Placeholder
    })

    return new_df

# Streamlit App
st.set_page_config(layout="wide", page_title="πŸ“ File Format Converter")
st.title("πŸ“€ Upload Two Files to Convert Format")

uploaded_main = st.file_uploader("Upload Main File (Sanctions, Admissions)", type=["csv", "xlsx"], key="main")
uploaded_attendance = st.file_uploader("Upload Attendance File", type=["csv", "xlsx"], key="att")

if uploaded_main and uploaded_attendance:
    try:
        # Determine extension and read both files
        ext1 = uploaded_main.name.split('.')[-1].lower()
        ext2 = uploaded_attendance.name.split('.')[-1].lower()

        if ext1 == 'csv':
            main_df = pd.read_csv(uploaded_main, skiprows=4)
        else:
            main_df = pd.read_excel(uploaded_main, skiprows=4)

        if ext2 == 'csv':
            attendance_df = pd.read_csv(uploaded_attendance)
        else:
            attendance_df = pd.read_excel(uploaded_attendance)

        st.success("βœ… Both files uploaded and read successfully!")

        new_df = transform_file(main_df, attendance_df)
        st.dataframe(new_df.head())

        output = io.BytesIO()
        with pd.ExcelWriter(output, engine='xlsxwriter') as writer:
            new_df.to_excel(writer, index=False, sheet_name='Sheet1')
            worksheet = writer.sheets['Sheet1']

            # Set wider column widths (20 characters)
            for i, col in enumerate(new_df.columns):
                worksheet.set_column(i, i, 20)

        st.download_button("πŸ“₯ Download Converted Excel File", output.getvalue(), file_name="converted_output.xlsx")

    except Exception as e:
        st.error(f"❌ Error while processing files: {e}")
"""

# ----------------- Save Streamlit Code -----------------------
with open("app_file_filter.py", "w") as f:
    f.write(dashboard_code)

# ----------------- Ngrok Setup & Streamlit Launch ------------------
ngrok.set_auth_token("30VAe4T9qTjFG7urJrdYiwizPYO_3BVhHNvCaLGXkWDVAtnmu")

# Function to run Streamlit
def run():
    os.system("streamlit run app_file_filter.py")

# Start Streamlit in background thread
thread = threading.Thread(target=run)
thread.start()

# Give Streamlit time to boot
time.sleep(5)

# Close existing tunnels if any
for tunnel in ngrok.get_tunnels():
    ngrok.disconnect(tunnel.public_url)

# Open a new ngrok tunnel
public_url = ngrok.connect(addr=8501)
print("πŸ”— Public URL:", public_url)