Spaces:
Sleeping
Sleeping
ArthurCFR Claude commited on
Commit ·
e936cff
1
Parent(s): 85c093e
Automate Assistant feature with ChatGPT API integration
Browse files- Added OpenAI API integration with ChatGPT key
- Created call_openai_api() and extract_json_from_response() functions
- Updated Assistant UI with two options: automatic generation vs manual instructions
- Added automatic JSON generation, parsing, and direct injection workflow
- Added session state variables for API processing tracking
- Implemented error handling and fallback to manual process
- Added cleanup of session state when switching modes or returning to home
The Assistant feature now supports:
1. 🤖 "Générer automatiquement" - Full automation via ChatGPT API
2. 📝 "Générer l'instruction (manuel)" - Traditional manual workflow
🤖 Generated with [Claude Code](https://claude.ai/code)
Co-Authored-By: Claude <noreply@anthropic.com>
app.py
CHANGED
|
@@ -15,6 +15,9 @@ HF_REPO_ID = "AIB-Research/bibliPrompt"
|
|
| 15 |
HF_JSON_FILENAME = "prompt_templates_data_v3.json"
|
| 16 |
HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
| 17 |
|
|
|
|
|
|
|
|
|
|
| 18 |
# --- Save to Hugging Face Space ---
|
| 19 |
def save_to_hf_space(json_path, repo_id=HF_REPO_ID, token=HF_TOKEN):
|
| 20 |
if not HF_HUB_AVAILABLE:
|
|
@@ -74,6 +77,86 @@ def load_from_hf_space(repo_id=HF_REPO_ID, token=HF_TOKEN):
|
|
| 74 |
st.warning(f"Erreur lors du chargement depuis Hugging Face: {e}")
|
| 75 |
return None
|
| 76 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
# --- PAGE CONFIGURATION (MUST BE THE FIRST STREAMLIT COMMAND) ---
|
| 78 |
# Fixed deployment issue
|
| 79 |
st.set_page_config(layout="wide", page_title="🛠️ Le laboratoire des Prompts IA", initial_sidebar_state="collapsed" )
|
|
@@ -569,6 +652,10 @@ if 'assistant_mode' not in st.session_state:
|
|
| 569 |
st.session_state.assistant_mode = "creation" # Modes possibles: "creation", "amelioration"
|
| 570 |
if 'assistant_existing_prompt_value' not in st.session_state:
|
| 571 |
st.session_state.assistant_existing_prompt_value = ""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 572 |
|
| 573 |
# --- Sidebar Navigation with Tabs ---
|
| 574 |
st.sidebar.header("Menu Principal")
|
|
@@ -1489,6 +1576,10 @@ elif st.session_state.view_mode == "inject_manual":
|
|
| 1489 |
elif st.session_state.view_mode == "assistant_creation": # Cette vue gère maintenant les deux modes
|
| 1490 |
if st.button("⬅️ Retour à l'accueil", key="back_to_accueil_from_assistant_unified"):
|
| 1491 |
st.session_state.view_mode = "accueil"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1492 |
st.rerun()
|
| 1493 |
st.header("✨ Assistant Prompt Système")
|
| 1494 |
|
|
@@ -1516,13 +1607,16 @@ elif st.session_state.view_mode == "assistant_creation": # Cette vue gère maint
|
|
| 1516 |
if selected_mode_key != st.session_state.assistant_mode:
|
| 1517 |
st.session_state.assistant_mode = selected_mode_key
|
| 1518 |
st.session_state.generated_meta_prompt_for_llm = "" # Vider le prompt généré car le mode a changé
|
|
|
|
|
|
|
|
|
|
| 1519 |
# Optionnel: vider les valeurs des formulaires lors du changement de mode pour éviter confusion
|
| 1520 |
# st.session_state.assistant_form_values = {var['name']: var['default'] for var in ASSISTANT_FORM_VARIABLES}
|
| 1521 |
# st.session_state.assistant_existing_prompt_value = ""
|
| 1522 |
st.rerun()
|
| 1523 |
|
| 1524 |
if st.session_state.assistant_mode == "creation":
|
| 1525 |
-
st.markdown("Décrivez votre besoin pour
|
| 1526 |
with st.form(key="assistant_creation_form_std"):
|
| 1527 |
# Initialiser current_form_input_values avec les valeurs de session_state ou les valeurs par défaut
|
| 1528 |
# pour que les champs du formulaire soient pré-remplis correctement.
|
|
@@ -1554,15 +1648,29 @@ elif st.session_state.view_mode == "assistant_creation": # Cette vue gère maint
|
|
| 1554 |
key=field_key,
|
| 1555 |
format="%g" # ou un autre format si nécessaire
|
| 1556 |
)
|
| 1557 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1558 |
|
| 1559 |
-
if
|
| 1560 |
st.session_state.assistant_form_values = temp_form_values.copy() # Sauvegarde les valeurs actuelles du formulaire
|
| 1561 |
try:
|
| 1562 |
# Vérifier si tous les champs requis pour ce template sont remplis (si nécessaire)
|
| 1563 |
populated_meta_prompt = META_PROMPT_FOR_EXTERNAL_LLM_TEMPLATE.format(**st.session_state.assistant_form_values)
|
| 1564 |
st.session_state.generated_meta_prompt_for_llm = populated_meta_prompt
|
| 1565 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1566 |
except KeyError as e:
|
| 1567 |
st.error(f"Erreur lors de la construction de l'instruction. Clé de formatage manquante : {e}.")
|
| 1568 |
st.session_state.generated_meta_prompt_for_llm = ""
|
|
@@ -1601,8 +1709,50 @@ elif st.session_state.view_mode == "assistant_creation": # Cette vue gère maint
|
|
| 1601 |
st.error(f"Une erreur inattendue est survenue : {e}")
|
| 1602 |
st.session_state.generated_meta_prompt_for_llm = ""
|
| 1603 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1604 |
# Affichage commun du méta-prompt généré (qu'il vienne de la création ou de l'amélioration)
|
| 1605 |
-
|
|
|
|
| 1606 |
st.subheader("📋 Instruction Générée (à coller dans votre LLM) :")
|
| 1607 |
|
| 1608 |
st.code(st.session_state.generated_meta_prompt_for_llm, language='markdown', line_numbers=True)
|
|
|
|
| 15 |
HF_JSON_FILENAME = "prompt_templates_data_v3.json"
|
| 16 |
HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
| 17 |
|
| 18 |
+
# --- OpenAI API integration ---
|
| 19 |
+
OPENAI_API_KEY = "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"
|
| 20 |
+
|
| 21 |
# --- Save to Hugging Face Space ---
|
| 22 |
def save_to_hf_space(json_path, repo_id=HF_REPO_ID, token=HF_TOKEN):
|
| 23 |
if not HF_HUB_AVAILABLE:
|
|
|
|
| 77 |
st.warning(f"Erreur lors du chargement depuis Hugging Face: {e}")
|
| 78 |
return None
|
| 79 |
|
| 80 |
+
# --- OpenAI API function ---
|
| 81 |
+
def call_openai_api(prompt_text, max_tokens=4000):
|
| 82 |
+
"""
|
| 83 |
+
Call OpenAI API to generate JSON response from the prompt template
|
| 84 |
+
"""
|
| 85 |
+
try:
|
| 86 |
+
headers = {
|
| 87 |
+
"Authorization": f"Bearer {OPENAI_API_KEY}",
|
| 88 |
+
"Content-Type": "application/json"
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
data = {
|
| 92 |
+
"model": "gpt-4",
|
| 93 |
+
"messages": [
|
| 94 |
+
{
|
| 95 |
+
"role": "user",
|
| 96 |
+
"content": prompt_text
|
| 97 |
+
}
|
| 98 |
+
],
|
| 99 |
+
"max_tokens": max_tokens,
|
| 100 |
+
"temperature": 0.7
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
response = requests.post(
|
| 104 |
+
"https://api.openai.com/v1/chat/completions",
|
| 105 |
+
headers=headers,
|
| 106 |
+
json=data,
|
| 107 |
+
timeout=60
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
if response.status_code == 200:
|
| 111 |
+
result = response.json()
|
| 112 |
+
if 'choices' in result and len(result['choices']) > 0:
|
| 113 |
+
return result['choices'][0]['message']['content'].strip()
|
| 114 |
+
else:
|
| 115 |
+
return None
|
| 116 |
+
else:
|
| 117 |
+
st.error(f"Erreur API OpenAI: {response.status_code} - {response.text}")
|
| 118 |
+
return None
|
| 119 |
+
|
| 120 |
+
except requests.exceptions.Timeout:
|
| 121 |
+
st.error("⏱️ Timeout lors de l'appel à l'API OpenAI. Veuillez réessayer.")
|
| 122 |
+
return None
|
| 123 |
+
except Exception as e:
|
| 124 |
+
st.error(f"Erreur lors de l'appel à l'API OpenAI: {e}")
|
| 125 |
+
return None
|
| 126 |
+
|
| 127 |
+
def extract_json_from_response(api_response):
|
| 128 |
+
"""
|
| 129 |
+
Extract JSON content from OpenAI API response
|
| 130 |
+
"""
|
| 131 |
+
try:
|
| 132 |
+
# Look for JSON content between ```json and ```
|
| 133 |
+
if "```json" in api_response:
|
| 134 |
+
start = api_response.find("```json") + 7
|
| 135 |
+
end = api_response.find("```", start)
|
| 136 |
+
if end > start:
|
| 137 |
+
json_str = api_response[start:end].strip()
|
| 138 |
+
else:
|
| 139 |
+
json_str = api_response[start:].strip()
|
| 140 |
+
else:
|
| 141 |
+
# Try to find JSON-like content
|
| 142 |
+
start = api_response.find("{")
|
| 143 |
+
end = api_response.rfind("}") + 1
|
| 144 |
+
if start >= 0 and end > start:
|
| 145 |
+
json_str = api_response[start:end]
|
| 146 |
+
else:
|
| 147 |
+
return None
|
| 148 |
+
|
| 149 |
+
# Parse and validate JSON
|
| 150 |
+
json_data = json.loads(json_str)
|
| 151 |
+
return json_data
|
| 152 |
+
|
| 153 |
+
except json.JSONDecodeError as e:
|
| 154 |
+
st.error(f"Erreur lors du parsing du JSON: {e}")
|
| 155 |
+
return None
|
| 156 |
+
except Exception as e:
|
| 157 |
+
st.error(f"Erreur lors de l'extraction du JSON: {e}")
|
| 158 |
+
return None
|
| 159 |
+
|
| 160 |
# --- PAGE CONFIGURATION (MUST BE THE FIRST STREAMLIT COMMAND) ---
|
| 161 |
# Fixed deployment issue
|
| 162 |
st.set_page_config(layout="wide", page_title="🛠️ Le laboratoire des Prompts IA", initial_sidebar_state="collapsed" )
|
|
|
|
| 652 |
st.session_state.assistant_mode = "creation" # Modes possibles: "creation", "amelioration"
|
| 653 |
if 'assistant_existing_prompt_value' not in st.session_state:
|
| 654 |
st.session_state.assistant_existing_prompt_value = ""
|
| 655 |
+
if 'assistant_api_processing' not in st.session_state:
|
| 656 |
+
st.session_state.assistant_api_processing = False
|
| 657 |
+
if 'assistant_generated_json' not in st.session_state:
|
| 658 |
+
st.session_state.assistant_generated_json = None
|
| 659 |
|
| 660 |
# --- Sidebar Navigation with Tabs ---
|
| 661 |
st.sidebar.header("Menu Principal")
|
|
|
|
| 1576 |
elif st.session_state.view_mode == "assistant_creation": # Cette vue gère maintenant les deux modes
|
| 1577 |
if st.button("⬅️ Retour à l'accueil", key="back_to_accueil_from_assistant_unified"):
|
| 1578 |
st.session_state.view_mode = "accueil"
|
| 1579 |
+
# Nettoyer les variables de l'assistant
|
| 1580 |
+
st.session_state.assistant_api_processing = False
|
| 1581 |
+
st.session_state.assistant_generated_json = None
|
| 1582 |
+
st.session_state.generated_meta_prompt_for_llm = ""
|
| 1583 |
st.rerun()
|
| 1584 |
st.header("✨ Assistant Prompt Système")
|
| 1585 |
|
|
|
|
| 1607 |
if selected_mode_key != st.session_state.assistant_mode:
|
| 1608 |
st.session_state.assistant_mode = selected_mode_key
|
| 1609 |
st.session_state.generated_meta_prompt_for_llm = "" # Vider le prompt généré car le mode a changé
|
| 1610 |
+
# Nettoyer les variables de traitement automatique
|
| 1611 |
+
st.session_state.assistant_api_processing = False
|
| 1612 |
+
st.session_state.assistant_generated_json = None
|
| 1613 |
# Optionnel: vider les valeurs des formulaires lors du changement de mode pour éviter confusion
|
| 1614 |
# st.session_state.assistant_form_values = {var['name']: var['default'] for var in ASSISTANT_FORM_VARIABLES}
|
| 1615 |
# st.session_state.assistant_existing_prompt_value = ""
|
| 1616 |
st.rerun()
|
| 1617 |
|
| 1618 |
if st.session_state.assistant_mode == "creation":
|
| 1619 |
+
st.markdown("Décrivez votre besoin pour créer automatiquement un cas d'usage. Choisissez **'Générer automatiquement'** pour une création complète via ChatGPT, ou **'Générer l'instruction (manuel)'** pour obtenir les instructions à coller dans votre LLM.")
|
| 1620 |
with st.form(key="assistant_creation_form_std"):
|
| 1621 |
# Initialiser current_form_input_values avec les valeurs de session_state ou les valeurs par défaut
|
| 1622 |
# pour que les champs du formulaire soient pré-remplis correctement.
|
|
|
|
| 1648 |
key=field_key,
|
| 1649 |
format="%g" # ou un autre format si nécessaire
|
| 1650 |
)
|
| 1651 |
+
col1, col2 = st.columns(2)
|
| 1652 |
+
with col1:
|
| 1653 |
+
submitted_assistant_form_auto = st.form_submit_button("🤖 Générer automatiquement", use_container_width=True, type="primary")
|
| 1654 |
+
with col2:
|
| 1655 |
+
submitted_assistant_form_manual = st.form_submit_button("📝 Générer l'instruction (manuel)", use_container_width=True)
|
| 1656 |
|
| 1657 |
+
if submitted_assistant_form_auto or submitted_assistant_form_manual:
|
| 1658 |
st.session_state.assistant_form_values = temp_form_values.copy() # Sauvegarde les valeurs actuelles du formulaire
|
| 1659 |
try:
|
| 1660 |
# Vérifier si tous les champs requis pour ce template sont remplis (si nécessaire)
|
| 1661 |
populated_meta_prompt = META_PROMPT_FOR_EXTERNAL_LLM_TEMPLATE.format(**st.session_state.assistant_form_values)
|
| 1662 |
st.session_state.generated_meta_prompt_for_llm = populated_meta_prompt
|
| 1663 |
+
|
| 1664 |
+
if submitted_assistant_form_auto:
|
| 1665 |
+
# Automatique: Appeler l'API OpenAI directement
|
| 1666 |
+
st.session_state.assistant_api_processing = True
|
| 1667 |
+
st.session_state.assistant_generated_json = None
|
| 1668 |
+
st.success("🤖 Génération automatique en cours...")
|
| 1669 |
+
st.rerun()
|
| 1670 |
+
else:
|
| 1671 |
+
# Manuel: Afficher l'instruction comme avant
|
| 1672 |
+
st.success("📝 Instruction de création générée !")
|
| 1673 |
+
|
| 1674 |
except KeyError as e:
|
| 1675 |
st.error(f"Erreur lors de la construction de l'instruction. Clé de formatage manquante : {e}.")
|
| 1676 |
st.session_state.generated_meta_prompt_for_llm = ""
|
|
|
|
| 1709 |
st.error(f"Une erreur inattendue est survenue : {e}")
|
| 1710 |
st.session_state.generated_meta_prompt_for_llm = ""
|
| 1711 |
|
| 1712 |
+
# --- Traitement automatique via API OpenAI ---
|
| 1713 |
+
if st.session_state.assistant_api_processing and st.session_state.generated_meta_prompt_for_llm:
|
| 1714 |
+
with st.spinner("🤖 Génération automatique en cours via ChatGPT..."):
|
| 1715 |
+
api_response = call_openai_api(st.session_state.generated_meta_prompt_for_llm)
|
| 1716 |
+
|
| 1717 |
+
if api_response:
|
| 1718 |
+
extracted_json = extract_json_from_response(api_response)
|
| 1719 |
+
if extracted_json:
|
| 1720 |
+
st.session_state.assistant_generated_json = extracted_json
|
| 1721 |
+
st.session_state.assistant_api_processing = False
|
| 1722 |
+
st.success("✅ Génération automatique terminée !")
|
| 1723 |
+
st.rerun()
|
| 1724 |
+
else:
|
| 1725 |
+
st.session_state.assistant_api_processing = False
|
| 1726 |
+
st.error("❌ Erreur: Impossible d'extraire le JSON de la réponse. Essayez le mode manuel.")
|
| 1727 |
+
else:
|
| 1728 |
+
st.session_state.assistant_api_processing = False
|
| 1729 |
+
st.error("❌ Erreur lors de l'appel à l'API. Essayez le mode manuel.")
|
| 1730 |
+
|
| 1731 |
+
# --- Affichage du JSON généré automatiquement ---
|
| 1732 |
+
if st.session_state.assistant_generated_json:
|
| 1733 |
+
st.subheader("🎉 JSON généré automatiquement :")
|
| 1734 |
+
st.json(st.session_state.assistant_generated_json)
|
| 1735 |
+
|
| 1736 |
+
# Bouton pour injecter directement
|
| 1737 |
+
col1, col2 = st.columns(2)
|
| 1738 |
+
with col1:
|
| 1739 |
+
if st.button("💉 Injecter ce JSON", key="inject_auto_json_btn", use_container_width=True, type="primary"):
|
| 1740 |
+
st.session_state.view_mode = "inject_manual"
|
| 1741 |
+
st.session_state.injection_selected_family = None
|
| 1742 |
+
st.session_state.injection_json_text = json.dumps(st.session_state.assistant_generated_json, indent=2, ensure_ascii=False)
|
| 1743 |
+
st.toast("JSON automatiquement collé dans l'injecteur !", icon="🎉")
|
| 1744 |
+
st.rerun()
|
| 1745 |
+
with col2:
|
| 1746 |
+
if st.button("🔄 Générer à nouveau", key="regenerate_auto_json_btn", use_container_width=True):
|
| 1747 |
+
st.session_state.assistant_api_processing = True
|
| 1748 |
+
st.session_state.assistant_generated_json = None
|
| 1749 |
+
st.rerun()
|
| 1750 |
+
|
| 1751 |
+
st.markdown("---")
|
| 1752 |
+
|
| 1753 |
# Affichage commun du méta-prompt généré (qu'il vienne de la création ou de l'amélioration)
|
| 1754 |
+
# Seulement si pas de JSON automatique généré
|
| 1755 |
+
if st.session_state.generated_meta_prompt_for_llm and not st.session_state.assistant_generated_json:
|
| 1756 |
st.subheader("📋 Instruction Générée (à coller dans votre LLM) :")
|
| 1757 |
|
| 1758 |
st.code(st.session_state.generated_meta_prompt_for_llm, language='markdown', line_numbers=True)
|