| import os |
| import requests |
| import json |
| import time |
| from requests.adapters import HTTPAdapter |
| from urllib3.util.retry import Retry |
|
|
|
|
| def generate_mermaid_from_intern(base64_image): |
| """ |
| Génère un diagramme Mermaid à partir d'une image en base64 |
| en utilisant l'API InternVL (Intern AI). |
| |
| Version avec retry automatique et gestion DNS robuste. |
| """ |
|
|
| |
| api_key = os.environ.get("INTERNVL_API_KEY", "") |
|
|
| |
| url = 'https://chat.intern-ai.org.cn/api/v1/chat/completions' |
|
|
| headers = { |
| 'Content-Type': 'application/json', |
| 'Authorization': f'Bearer {api_key}' |
| } |
|
|
| |
| prompt = """Analyze this mindmap image and convert it into a Mermaid diagram format. |
| |
| IMPORTANT RULES: |
| 1. Use ONLY 'graph TD' (top-down) direction |
| 2. Format: graph TD |
| 3. Each node: ID[Label] |
| 4. Each edge: Parent --> Child |
| 5. Extract ALL nodes and relationships visible in the image |
| 6. Preserve the hierarchical structure |
| 7. Use simple, clear node IDs (A, B, C, etc.) |
| |
| Return ONLY the Mermaid code, nothing else.""" |
|
|
| |
| models_to_try = [ |
| 'intern-latest', |
| 'internvl3.5-latest', |
| 'internvl3-latest', |
| 'internvl-latest', |
| 'internvl3.5-241b-a28b', |
| 'internvl3-78b', |
| 'intern-s1', |
| 'intern-s1-mini', |
| ] |
|
|
| |
| |
| |
|
|
| |
| session = requests.Session() |
|
|
| retry_strategy = Retry( |
| total=5, |
| backoff_factor=2, |
| status_forcelist=[429, 500, 502, 503, 504], |
| allowed_methods=["POST"], |
| raise_on_status=False |
| ) |
|
|
| adapter = HTTPAdapter( |
| max_retries=retry_strategy, |
| pool_connections=10, |
| pool_maxsize=10 |
| ) |
|
|
| session.mount("https://", adapter) |
| session.mount("http://", adapter) |
|
|
| last_error = None |
|
|
| for model_name in models_to_try: |
| |
| |
| |
| max_dns_retries = 3 |
|
|
| for dns_attempt in range(max_dns_retries): |
| try: |
| |
| data = { |
| 'model': model_name, |
| 'messages': [ |
| { |
| 'role': 'user', |
| 'content': [ |
| { |
| 'type': 'text', |
| 'text': prompt |
| }, |
| { |
| 'type': 'image_url', |
| 'image_url': { |
| 'url': f"data:image/jpeg;base64,{base64_image}" |
| } |
| } |
| ] |
| } |
| ], |
| 'temperature': 0.1, |
| 'top_p': 0.9, |
| 'max_tokens': 2000 |
| } |
|
|
| |
| response = session.post( |
| url, |
| headers=headers, |
| json=data, |
| timeout=90 |
| ) |
|
|
| |
| if response.status_code == 200: |
| result = response.json() |
|
|
| |
| if 'choices' in result and len(result['choices']) > 0: |
| raw_text = result['choices'][0]['message']['content'] |
|
|
| |
| clean_lines = [] |
| for line in raw_text.split('\n'): |
| clean_line = line.replace('```mermaid', '').replace('```', '').strip() |
| if clean_line and not clean_line.startswith("style"): |
| clean_lines.append(clean_line) |
|
|
| mermaid_code = "\n".join(clean_lines) |
|
|
| |
| if mermaid_code and ('graph' in mermaid_code.lower() or '-->' in mermaid_code): |
| print(f"β
Modèle InternVL utilisé avec succès : {model_name}") |
| session.close() |
| return mermaid_code |
| else: |
| raise ValueError(f"Réponse invalide pour le modèle {model_name}") |
| else: |
| raise ValueError(f"Format de réponse invalide pour le modèle {model_name}") |
|
|
| |
| elif response.status_code == 401: |
| last_error = f"401 Unauthorized avec modèle {model_name}" |
| break |
|
|
| |
| elif response.status_code == 400: |
| try: |
| error_data = response.json() |
| last_error = f"400 Bad Request avec modèle {model_name}: {error_data.get('message', error_data.get('msg', 'Unknown error'))}" |
| except: |
| last_error = f"400 Bad Request avec modèle {model_name}" |
| break |
|
|
| |
| elif response.status_code == 404: |
| last_error = f"404 Not Found avec modèle {model_name}" |
| break |
|
|
| |
| else: |
| response.raise_for_status() |
|
|
| |
| |
| |
| except (requests.exceptions.ConnectionError, |
| requests.exceptions.Timeout, |
| ConnectionResetError) as e: |
|
|
| error_str = str(e).lower() |
|
|
| |
| if 'getaddrinfo' in error_str or 'resolve' in error_str or 'name resolution' in error_str: |
| if dns_attempt < max_dns_retries - 1: |
| wait_time = (dns_attempt + 1) * 5 |
| print( |
| f"β οΈ Erreur DNS pour {model_name}, retry {dns_attempt + 1}/{max_dns_retries} dans {wait_time}s...") |
| time.sleep(wait_time) |
| continue |
| else: |
| last_error = f"Erreur DNS persistante avec modèle {model_name} après {max_dns_retries} tentatives: {e}" |
| break |
|
|
| |
| elif 'timeout' in error_str: |
| if dns_attempt < max_dns_retries - 1: |
| wait_time = (dns_attempt + 1) * 3 |
| print( |
| f"β±οΈ Timeout pour {model_name}, retry {dns_attempt + 1}/{max_dns_retries} dans {wait_time}s...") |
| time.sleep(wait_time) |
| continue |
| else: |
| last_error = f"Timeout persistant avec modèle {model_name}: {e}" |
| break |
|
|
| |
| else: |
| last_error = f"Erreur de connexion avec modèle {model_name}: {e}" |
| break |
|
|
| except requests.exceptions.RequestException as e: |
| last_error = f"Erreur requΓͺte avec modΓ¨le {model_name}: {e}" |
| break |
|
|
| except Exception as e: |
| last_error = f"Erreur inattendue avec modèle {model_name}: {e}" |
| break |
|
|
| |
| break |
|
|
| |
| session.close() |
|
|
| |
| raise RuntimeError(f"Γchec de tous les modΓ¨les InternVL. DerniΓ¨re erreur: {last_error}") |
|
|
|
|
|
|