Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -37,9 +37,9 @@ def display_app_header():
|
|
| 37 |
# Add a description of the app
|
| 38 |
st.markdown("""This app allows you to generate EAD/XML archival descriptions. See this serie of blog posts for explanations :
|
| 39 |
|
| 40 |
-
- https://iaetbibliotheques.fr/2024/11/comment-apprendre-lead-a-un-llm
|
| 41 |
-
- https://iaetbibliotheques.fr/2024/11/comment-apprendre-lead-a-un-llm-rag-23
|
| 42 |
-
- https://iaetbibliotheques.fr/2024/12/comment-apprendre-lead-a-un-llm-fine-tuning-33
|
| 43 |
""")
|
| 44 |
st.markdown("---")
|
| 45 |
|
|
@@ -48,7 +48,7 @@ history_col, main_col = setup_page()
|
|
| 48 |
|
| 49 |
def setup_sidebar():
|
| 50 |
groq_models = ["llama3-70b-8192", "llama-3.1-70b-versatile","llama3-8b-8192", "llama-3.1-8b-instant", "mixtral-8x7b-32768","gemma2-9b-it", "gemma-7b-it"]
|
| 51 |
-
selected_groq_models = st.sidebar.radio("Choose a
|
| 52 |
return selected_groq_models
|
| 53 |
|
| 54 |
def create_groq_llm(model):
|
|
@@ -193,25 +193,6 @@ def setup_rag_tab(llm):
|
|
| 193 |
|
| 194 |
return chain
|
| 195 |
|
| 196 |
-
"""def setup_local_fine_tuned_tab(query):
|
| 197 |
-
st.header("Fine-tuned Zephir model")
|
| 198 |
-
|
| 199 |
-
llm = Llama(
|
| 200 |
-
model_path="assets/FineZephir-sft-instruct-ead-Q5_K_M.gguf",
|
| 201 |
-
n_ctx=1024,
|
| 202 |
-
verbose=False
|
| 203 |
-
)
|
| 204 |
-
output = llm.create_chat_completion(
|
| 205 |
-
messages = [
|
| 206 |
-
{"role": "system", "content": "You are an archivist expert in EAD format."},
|
| 207 |
-
{
|
| 208 |
-
"role": "user",
|
| 209 |
-
"content": query
|
| 210 |
-
}
|
| 211 |
-
]
|
| 212 |
-
)
|
| 213 |
-
return output["choices"][0]["message"]["content"]"""
|
| 214 |
-
|
| 215 |
def setup_fine_tuned_tab():
|
| 216 |
st.header("Fine-tuned Zephir model")
|
| 217 |
|
|
@@ -305,17 +286,6 @@ with main_col:
|
|
| 305 |
st.markdown(st.session_state.response_rag)
|
| 306 |
|
| 307 |
# Process for Tab 4 - Fine-tuned model
|
| 308 |
-
"""with tab4:
|
| 309 |
-
st.session_state.response_fine_tuned = set_up_fine_tuned_tab(query)
|
| 310 |
-
soup = BeautifulSoup(st.session_state.response_fine_tuned, "lxml-xml")
|
| 311 |
-
with st.chat_message("assistant"):
|
| 312 |
-
st.code(soup.prettify(), language="xml-doc")"""
|
| 313 |
-
"""with tab4:
|
| 314 |
-
llm = create_aws_ollama_llm()
|
| 315 |
-
fine_tuned_chain = setup_fine-tuned_tab(llm)
|
| 316 |
-
st.session_state.response_fine_tuned = fine_tuned_chain.invoke(query)
|
| 317 |
-
with st.chat_message("assistant"):
|
| 318 |
-
st.markdown(st.session_state.response_fine_tuned)"""
|
| 319 |
with tab4:
|
| 320 |
st.write("Coming soon...")
|
| 321 |
|
|
|
|
| 37 |
# Add a description of the app
|
| 38 |
st.markdown("""This app allows you to generate EAD/XML archival descriptions. See this serie of blog posts for explanations :
|
| 39 |
|
| 40 |
+
- [https://iaetbibliotheques.fr/2024/11/comment-apprendre-lead-a-un-llm](https://iaetbibliotheques.fr/2024/11/comment-apprendre-lead-a-un-llm)
|
| 41 |
+
- [https://iaetbibliotheques.fr/2024/11/comment-apprendre-lead-a-un-llm-rag-23](https://iaetbibliotheques.fr/2024/11/comment-apprendre-lead-a-un-llm-rag-23)
|
| 42 |
+
- [https://iaetbibliotheques.fr/2024/12/comment-apprendre-lead-a-un-llm-fine-tuning-33](https://iaetbibliotheques.fr/2024/12/comment-apprendre-lead-a-un-llm-fine-tuning-33)
|
| 43 |
""")
|
| 44 |
st.markdown("---")
|
| 45 |
|
|
|
|
| 48 |
|
| 49 |
def setup_sidebar():
|
| 50 |
groq_models = ["llama3-70b-8192", "llama-3.1-70b-versatile","llama3-8b-8192", "llama-3.1-8b-instant", "mixtral-8x7b-32768","gemma2-9b-it", "gemma-7b-it"]
|
| 51 |
+
selected_groq_models = st.sidebar.radio("Choose a model (used in tabs 1, 2 and 3)", groq_models)
|
| 52 |
return selected_groq_models
|
| 53 |
|
| 54 |
def create_groq_llm(model):
|
|
|
|
| 193 |
|
| 194 |
return chain
|
| 195 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 196 |
def setup_fine_tuned_tab():
|
| 197 |
st.header("Fine-tuned Zephir model")
|
| 198 |
|
|
|
|
| 286 |
st.markdown(st.session_state.response_rag)
|
| 287 |
|
| 288 |
# Process for Tab 4 - Fine-tuned model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 289 |
with tab4:
|
| 290 |
st.write("Coming soon...")
|
| 291 |
|