Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -15,7 +15,6 @@ HF_API_KEY = os.getenv("HF_API_KEY")
|
|
| 15 |
# β
Initialize Hugging Face API Client
|
| 16 |
client = InferenceClient(token=HF_API_KEY)
|
| 17 |
|
| 18 |
-
|
| 19 |
# β
Load Local Model with Device Optimization
|
| 20 |
MODEL_NAME = "mistralai/Mistral-7B-Instruct-v0.2"
|
| 21 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
@@ -99,3 +98,20 @@ if st.button("Ask AI via API"):
|
|
| 99 |
if st.button("Ask AI via Local Model"):
|
| 100 |
ai_response = ask_mistral_local(user_query)
|
| 101 |
st.write("π§ **Mistral AI Suggests:**", ai_response)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
# β
Initialize Hugging Face API Client
|
| 16 |
client = InferenceClient(token=HF_API_KEY)
|
| 17 |
|
|
|
|
| 18 |
# β
Load Local Model with Device Optimization
|
| 19 |
MODEL_NAME = "mistralai/Mistral-7B-Instruct-v0.2"
|
| 20 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
|
| 98 |
if st.button("Ask AI via Local Model"):
|
| 99 |
ai_response = ask_mistral_local(user_query)
|
| 100 |
st.write("π§ **Mistral AI Suggests:**", ai_response)
|
| 101 |
+
|
| 102 |
+
# β
Auto-Schedule Feature
|
| 103 |
+
st.markdown("## π
Auto-Schedule Missing Timetable Slots")
|
| 104 |
+
selected_file = st.selectbox("Choose a timetable file to auto-fill missing slots:", list(uploaded_files.keys()))
|
| 105 |
+
|
| 106 |
+
if st.button("Auto-Schedule"):
|
| 107 |
+
result = auto_schedule(uploaded_files[selected_file])
|
| 108 |
+
st.write("β
", result)
|
| 109 |
+
|
| 110 |
+
# β
Display Uploaded Timetables
|
| 111 |
+
st.markdown("## π View Uploaded Timetables")
|
| 112 |
+
|
| 113 |
+
for name, file in uploaded_files.items():
|
| 114 |
+
if file:
|
| 115 |
+
df = pd.read_excel(file)
|
| 116 |
+
st.markdown(f"### {name}")
|
| 117 |
+
st.dataframe(df)
|