Upload 8 files
Browse files- demo-1.py +150 -0
- example_save_load-1.py +119 -0
- generative.py +196 -0
- linear.py +302 -0
- neuron.py +140 -0
- publish-to-pypi.yml +71 -0
- pyproject.toml +9 -0
- requirements-1.txt +1 -0
demo-1.py
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Démonstration du framework Sitiai
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import sitiai
|
| 7 |
+
|
| 8 |
+
print("=" * 60)
|
| 9 |
+
print("Bienvenue dans Sitiai!")
|
| 10 |
+
print("Framework Python pour créer et entraîner des IA légères")
|
| 11 |
+
print("=" * 60)
|
| 12 |
+
|
| 13 |
+
# ============================================
|
| 14 |
+
# DEMO 1: IA GÉNÉRATIVE - Générateur de noms
|
| 15 |
+
# ============================================
|
| 16 |
+
|
| 17 |
+
print("\n\n📝 DÉMO 1: Générateur de Noms")
|
| 18 |
+
print("-" * 60)
|
| 19 |
+
|
| 20 |
+
# Créer une IA générative pour les noms
|
| 21 |
+
print("\n1. Création d'une IA générative...")
|
| 22 |
+
name_ai = sitiai.create.ai('generative', mode='name_generator')
|
| 23 |
+
print(f"✓ {name_ai}")
|
| 24 |
+
|
| 25 |
+
# Charger des données d'entraînement
|
| 26 |
+
print("\n2. Chargement des données d'entraînement...")
|
| 27 |
+
noms_francais = [
|
| 28 |
+
"Clemylia", "Alexandre", "Sophie", "Marie", "Pierre",
|
| 29 |
+
"Julien", "Camille", "Lucas", "Emma", "Hugo",
|
| 30 |
+
"Léa", "Thomas", "Chloé", "Nathan", "Manon",
|
| 31 |
+
"Baptiste", "Clara", "Antoine", "Juliette", "Maxime"
|
| 32 |
+
]
|
| 33 |
+
name_ai.load_data(noms_francais)
|
| 34 |
+
print(f"✓ {len(noms_francais)} noms chargés")
|
| 35 |
+
|
| 36 |
+
# Entraîner le modèle
|
| 37 |
+
print("\n3. Entraînement du modèle...")
|
| 38 |
+
name_ai.train(epochs=100)
|
| 39 |
+
print("✓ Entraînement terminé!")
|
| 40 |
+
|
| 41 |
+
# Générer de nouveaux noms
|
| 42 |
+
print("\n4. Génération de nouveaux noms:")
|
| 43 |
+
nouveaux_noms = name_ai.generate_batch(n=8, temperature=0.8)
|
| 44 |
+
for i, nom in enumerate(nouveaux_noms, 1):
|
| 45 |
+
print(f" {i}. {nom}")
|
| 46 |
+
|
| 47 |
+
# ============================================
|
| 48 |
+
# DEMO 2: IA DE PRÉDICTION LINÉAIRE
|
| 49 |
+
# ============================================
|
| 50 |
+
|
| 51 |
+
print("\n\n" + "=" * 60)
|
| 52 |
+
print("📊 DÉMO 2: Prédiction Linéaire")
|
| 53 |
+
print("-" * 60)
|
| 54 |
+
|
| 55 |
+
# Créer des données synthétiques pour la régression
|
| 56 |
+
print("\n1. Création de données synthétiques...")
|
| 57 |
+
np.random.seed(42)
|
| 58 |
+
n_samples = 200
|
| 59 |
+
|
| 60 |
+
# Relation: y = 3*x1 + 2*x2 - x3 + 5 + bruit
|
| 61 |
+
X_train = np.random.randn(n_samples, 3)
|
| 62 |
+
y_train = 3 * X_train[:, 0] + 2 * X_train[:, 1] - X_train[:, 2] + 5
|
| 63 |
+
y_train += np.random.randn(n_samples) * 0.5 # Ajouter du bruit
|
| 64 |
+
|
| 65 |
+
X_test = np.random.randn(50, 3)
|
| 66 |
+
y_test = 3 * X_test[:, 0] + 2 * X_test[:, 1] - X_test[:, 2] + 5
|
| 67 |
+
y_test += np.random.randn(50) * 0.5
|
| 68 |
+
|
| 69 |
+
print(f"✓ {n_samples} échantillons d'entraînement, 50 échantillons de test")
|
| 70 |
+
|
| 71 |
+
# Créer une IA de prédiction linéaire
|
| 72 |
+
print("\n2. Création d'une IA de prédiction linéaire...")
|
| 73 |
+
linear_ai = sitiai.create.ai('linear', input_size=3, output_size=1, hidden_layers=[16, 8])
|
| 74 |
+
print(f"✓ {linear_ai}")
|
| 75 |
+
|
| 76 |
+
# Entraîner le modèle
|
| 77 |
+
print("\n3. Entraînement du modèle (cela peut prendre quelques secondes)...")
|
| 78 |
+
linear_ai.train(X_train, y_train, epochs=200, learning_rate=0.01, verbose=False)
|
| 79 |
+
print("✓ Entraînement terminé!")
|
| 80 |
+
|
| 81 |
+
# Évaluer le modèle
|
| 82 |
+
print("\n4. Évaluation sur les données de test...")
|
| 83 |
+
mse, r2 = linear_ai.evaluate(X_test, y_test)
|
| 84 |
+
print(f" • Erreur quadratique moyenne (MSE): {mse:.4f}")
|
| 85 |
+
print(f" • Score R² (coefficient de détermination): {r2:.4f}")
|
| 86 |
+
|
| 87 |
+
# Faire quelques prédictions
|
| 88 |
+
print("\n5. Exemples de prédictions:")
|
| 89 |
+
for i in range(5):
|
| 90 |
+
x = X_test[i:i+1]
|
| 91 |
+
pred = linear_ai.predict(x)[0, 0]
|
| 92 |
+
real = y_test[i]
|
| 93 |
+
print(f" Prédiction: {pred:.2f} | Réel: {real:.2f} | Erreur: {abs(pred - real):.2f}")
|
| 94 |
+
|
| 95 |
+
# ============================================
|
| 96 |
+
# DEMO 3: Utilisation de SitiNEUR directement
|
| 97 |
+
# ============================================
|
| 98 |
+
|
| 99 |
+
print("\n\n" + "=" * 60)
|
| 100 |
+
print("🧠 DÉMO 3: Utilisation directe de SitiNEUR")
|
| 101 |
+
print("-" * 60)
|
| 102 |
+
|
| 103 |
+
print("\n1. Création d'une couche de neurones...")
|
| 104 |
+
layer = sitiai.SitiNEUR(input_size=5, output_size=3, activation='relu')
|
| 105 |
+
print(f"✓ {layer}")
|
| 106 |
+
|
| 107 |
+
print("\n2. Test de propagation avant...")
|
| 108 |
+
input_data = np.random.randn(2, 5) # 2 exemples, 5 features
|
| 109 |
+
output = layer.forward(input_data)
|
| 110 |
+
print(f" Entrée shape: {input_data.shape}")
|
| 111 |
+
print(f" Sortie shape: {output.shape}")
|
| 112 |
+
print(f" Sortie:\n{output}")
|
| 113 |
+
|
| 114 |
+
# ============================================
|
| 115 |
+
# DEMO 4: Sauvegarde et Chargement de Modèles
|
| 116 |
+
# ============================================
|
| 117 |
+
|
| 118 |
+
print("\n\n" + "=" * 60)
|
| 119 |
+
print("💾 DÉMO 4: Sauvegarde et Chargement de Modèles")
|
| 120 |
+
print("-" * 60)
|
| 121 |
+
|
| 122 |
+
print("\n1. Sauvegarde du modèle linéaire...")
|
| 123 |
+
linear_ai.save_weights('demo_linear_model.npz')
|
| 124 |
+
|
| 125 |
+
print("\n2. Sauvegarde du générateur de noms...")
|
| 126 |
+
name_ai.save_weights('demo_name_generator.npz')
|
| 127 |
+
|
| 128 |
+
print("\n3. Test de chargement...")
|
| 129 |
+
loaded_ai = sitiai.create.ai('linear', input_size=3, output_size=1, hidden_layers=[16, 8])
|
| 130 |
+
loaded_ai.load_weights('demo_linear_model.npz')
|
| 131 |
+
|
| 132 |
+
# Vérifier que ça fonctionne
|
| 133 |
+
test_pred = loaded_ai.predict(X_test[0:1])
|
| 134 |
+
print(f" Prédiction avec modèle chargé: {test_pred[0, 0]:.2f}")
|
| 135 |
+
|
| 136 |
+
# ============================================
|
| 137 |
+
# Conclusion
|
| 138 |
+
# ============================================
|
| 139 |
+
|
| 140 |
+
print("\n\n" + "=" * 60)
|
| 141 |
+
print("✨ Démonstration terminée!")
|
| 142 |
+
print("=" * 60)
|
| 143 |
+
print("\nSitiai vous permet de:")
|
| 144 |
+
print(" • Créer des IA génératives pour générer des noms, textes, etc.")
|
| 145 |
+
print(" • Créer des IA de prédiction linéaire pour la régression")
|
| 146 |
+
print(" • Utiliser des couches de neurones SitiNEUR facilement")
|
| 147 |
+
print(" • Sauvegarder et charger vos modèles (.npz)")
|
| 148 |
+
print(" • Partager vos modèles facilement!")
|
| 149 |
+
print("\nSyntaxe simple inspirée de PyTorch, mais plus accessible!")
|
| 150 |
+
print("=" * 60)
|
example_save_load-1.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
"""
|
| 3 |
+
Exemple de sauvegarde et chargement de modèles Sitiai
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
import sitiai
|
| 8 |
+
|
| 9 |
+
print("=" * 60)
|
| 10 |
+
print("Exemple: Sauvegarde et Chargement de Modèles")
|
| 11 |
+
print("=" * 60)
|
| 12 |
+
|
| 13 |
+
# ============================================
|
| 14 |
+
# EXEMPLE 1: Modèle Linéaire
|
| 15 |
+
# ============================================
|
| 16 |
+
|
| 17 |
+
print("\n📊 EXEMPLE 1: Modèle de Régression Linéaire")
|
| 18 |
+
print("-" * 60)
|
| 19 |
+
|
| 20 |
+
# Créer des données
|
| 21 |
+
np.random.seed(42)
|
| 22 |
+
X_train = np.random.randn(100, 3)
|
| 23 |
+
y_train = 3 * X_train[:, 0] + 2 * X_train[:, 1] - X_train[:, 2] + 5 + np.random.randn(100) * 0.5
|
| 24 |
+
|
| 25 |
+
# Créer et entraîner le modèle
|
| 26 |
+
print("\n1. Création et entraînement du modèle...")
|
| 27 |
+
ai = sitiai.create.ai('linear', input_size=3, output_size=1, hidden_layers=[16, 8])
|
| 28 |
+
ai.train(X_train, y_train, epochs=100, verbose=False)
|
| 29 |
+
print(f"✓ Modèle entraîné: {ai}")
|
| 30 |
+
|
| 31 |
+
# Faire une prédiction avant sauvegarde
|
| 32 |
+
X_test = np.array([[1.0, 2.0, 0.5]])
|
| 33 |
+
pred_before = ai.predict(X_test)
|
| 34 |
+
print(f"\n2. Prédiction avant sauvegarde: {pred_before[0, 0]:.4f}")
|
| 35 |
+
|
| 36 |
+
# Sauvegarder le modèle
|
| 37 |
+
print("\n3. Sauvegarde du modèle...")
|
| 38 |
+
ai.save_weights('my_linear_model.npz')
|
| 39 |
+
|
| 40 |
+
# Créer un nouveau modèle et charger les poids
|
| 41 |
+
print("\n4. Création d'un nouveau modèle et chargement des poids...")
|
| 42 |
+
ai_loaded = sitiai.create.ai('linear', input_size=3, output_size=1, hidden_layers=[16, 8])
|
| 43 |
+
ai_loaded.load_weights('my_linear_model.npz')
|
| 44 |
+
|
| 45 |
+
# Vérifier que les prédictions sont identiques
|
| 46 |
+
pred_after = ai_loaded.predict(X_test)
|
| 47 |
+
print(f"\n5. Prédiction après chargement: {pred_after[0, 0]:.4f}")
|
| 48 |
+
print(f" Différence: {abs(pred_before[0, 0] - pred_after[0, 0]):.10f}")
|
| 49 |
+
|
| 50 |
+
# ============================================
|
| 51 |
+
# EXEMPLE 2: Modèle Génératif
|
| 52 |
+
# ============================================
|
| 53 |
+
|
| 54 |
+
print("\n\n" + "=" * 60)
|
| 55 |
+
print("📝 EXEMPLE 2: Générateur de Noms")
|
| 56 |
+
print("-" * 60)
|
| 57 |
+
|
| 58 |
+
# Créer et entraîner le modèle
|
| 59 |
+
print("\n1. Création et entraînement du modèle génératif...")
|
| 60 |
+
name_ai = sitiai.create.ai('generative', mode='name_generator')
|
| 61 |
+
noms = ["Alexandre", "Sophie", "Marie", "Pierre", "Julien", "Camille"]
|
| 62 |
+
name_ai.load_data(noms)
|
| 63 |
+
name_ai.train(epochs=100)
|
| 64 |
+
print(f"✓ {name_ai}")
|
| 65 |
+
|
| 66 |
+
# Générer avant sauvegarde
|
| 67 |
+
print("\n2. Génération avant sauvegarde:")
|
| 68 |
+
names_before = name_ai.generate_batch(n=3, temperature=0.8)
|
| 69 |
+
for i, nom in enumerate(names_before, 1):
|
| 70 |
+
print(f" {i}. {nom}")
|
| 71 |
+
|
| 72 |
+
# Sauvegarder
|
| 73 |
+
print("\n3. Sauvegarde du modèle...")
|
| 74 |
+
name_ai.save_weights('name_generator.npz')
|
| 75 |
+
|
| 76 |
+
# Charger dans un nouveau modèle
|
| 77 |
+
print("\n4. Chargement dans un nouveau modèle...")
|
| 78 |
+
name_ai_loaded = sitiai.create.ai('generative')
|
| 79 |
+
name_ai_loaded.load_weights('name_generator.npz')
|
| 80 |
+
|
| 81 |
+
# Générer après chargement
|
| 82 |
+
print("\n5. Génération après chargement:")
|
| 83 |
+
names_after = name_ai_loaded.generate_batch(n=3, temperature=0.8)
|
| 84 |
+
for i, nom in enumerate(names_after, 1):
|
| 85 |
+
print(f" {i}. {nom}")
|
| 86 |
+
|
| 87 |
+
# ============================================
|
| 88 |
+
# EXEMPLE 3: Partage de Modèle
|
| 89 |
+
# ============================================
|
| 90 |
+
|
| 91 |
+
print("\n\n" + "=" * 60)
|
| 92 |
+
print("🌐 EXEMPLE 3: Partage de Modèle")
|
| 93 |
+
print("-" * 60)
|
| 94 |
+
|
| 95 |
+
print("""
|
| 96 |
+
Vos modèles peuvent maintenant être partagés facilement!
|
| 97 |
+
|
| 98 |
+
Exemples d'utilisation:
|
| 99 |
+
1. Sauvegarder localement:
|
| 100 |
+
ai.save_weights('mon_modele.npz')
|
| 101 |
+
|
| 102 |
+
2. Partager sur GitHub:
|
| 103 |
+
- Commitez le fichier .npz dans votre repo
|
| 104 |
+
- Les utilisateurs peuvent télécharger et charger avec load_weights()
|
| 105 |
+
|
| 106 |
+
3. Publier sur un service cloud:
|
| 107 |
+
- Upload le fichier .npz
|
| 108 |
+
- Partagez le lien de téléchargement
|
| 109 |
+
|
| 110 |
+
4. Intégrer dans une application:
|
| 111 |
+
- Incluez le fichier .npz dans votre package
|
| 112 |
+
- Chargez-le au démarrage de l'application
|
| 113 |
+
|
| 114 |
+
Le format .npz est compact et compatible avec NumPy!
|
| 115 |
+
""")
|
| 116 |
+
|
| 117 |
+
print("=" * 60)
|
| 118 |
+
print("✨ Démonstration terminée!")
|
| 119 |
+
print("=" * 60)
|
generative.py
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Module pour les IA génératives légères
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
from typing import List, Optional
|
| 7 |
+
import random
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class GenerativeAI:
|
| 11 |
+
"""
|
| 12 |
+
IA générative légère pour créer des noms et autres contenus
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
mode: Mode de génération ('name_generator', 'text_generator')
|
| 16 |
+
**kwargs: Paramètres additionnels
|
| 17 |
+
|
| 18 |
+
Example:
|
| 19 |
+
>>> from sitiai import create
|
| 20 |
+
>>> ai = create.ai('generative', mode='name_generator')
|
| 21 |
+
>>> ai.load_data(['Alice', 'Bob', 'Charlie', 'David'])
|
| 22 |
+
>>> ai.train(epochs=100)
|
| 23 |
+
>>> new_name = ai.generate()
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
def __init__(self, mode: str = 'name_generator', **kwargs):
|
| 27 |
+
self.mode = mode
|
| 28 |
+
self.data = []
|
| 29 |
+
self.char_to_idx = {}
|
| 30 |
+
self.idx_to_char = {}
|
| 31 |
+
self.transition_matrix = None
|
| 32 |
+
self.is_trained = False
|
| 33 |
+
|
| 34 |
+
def load_data(self, data: List[str]):
|
| 35 |
+
"""
|
| 36 |
+
Charge les données d'entraînement
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
data: Liste de chaînes de caractères pour l'entraînement
|
| 40 |
+
"""
|
| 41 |
+
self.data = [d.lower() for d in data]
|
| 42 |
+
|
| 43 |
+
# Créer le vocabulaire avec tokens spéciaux
|
| 44 |
+
all_chars = set(''.join(self.data))
|
| 45 |
+
all_chars.add('^') # Token START
|
| 46 |
+
all_chars.add('$') # Token END
|
| 47 |
+
|
| 48 |
+
self.char_to_idx = {char: idx for idx, char in enumerate(sorted(all_chars))}
|
| 49 |
+
self.idx_to_char = {idx: char for char, idx in self.char_to_idx.items()}
|
| 50 |
+
|
| 51 |
+
def train(self, epochs: int = 100, ngram_size: int = 2):
|
| 52 |
+
"""
|
| 53 |
+
Entraîne le modèle génératif
|
| 54 |
+
|
| 55 |
+
Args:
|
| 56 |
+
epochs: Nombre d'époques (non utilisé pour les n-grammes, mais conservé pour l'API)
|
| 57 |
+
ngram_size: Taille des n-grammes (par défaut 2 = bigrammes)
|
| 58 |
+
"""
|
| 59 |
+
if not self.data:
|
| 60 |
+
raise ValueError("Aucune donnée chargée. Utilisez load_data() d'abord.")
|
| 61 |
+
|
| 62 |
+
vocab_size = len(self.char_to_idx)
|
| 63 |
+
self.transition_matrix = np.zeros((vocab_size, vocab_size))
|
| 64 |
+
|
| 65 |
+
# Compter les transitions
|
| 66 |
+
for word in self.data:
|
| 67 |
+
# Ajouter tokens START (^) et END ($)
|
| 68 |
+
extended_word = '^' + word + '$'
|
| 69 |
+
|
| 70 |
+
for i in range(len(extended_word) - 1):
|
| 71 |
+
curr_char = extended_word[i]
|
| 72 |
+
next_char = extended_word[i + 1]
|
| 73 |
+
|
| 74 |
+
curr_idx = self.char_to_idx[curr_char]
|
| 75 |
+
next_idx = self.char_to_idx[next_char]
|
| 76 |
+
|
| 77 |
+
self.transition_matrix[curr_idx, next_idx] += 1
|
| 78 |
+
|
| 79 |
+
# Normaliser pour obtenir des probabilités
|
| 80 |
+
row_sums = self.transition_matrix.sum(axis=1, keepdims=True)
|
| 81 |
+
row_sums[row_sums == 0] = 1 # Éviter division par zéro
|
| 82 |
+
self.transition_matrix = self.transition_matrix / row_sums
|
| 83 |
+
|
| 84 |
+
self.is_trained = True
|
| 85 |
+
|
| 86 |
+
def generate(self, max_length: int = 20, temperature: float = 1.0) -> str:
|
| 87 |
+
"""
|
| 88 |
+
Génère un nouveau nom ou texte
|
| 89 |
+
|
| 90 |
+
Args:
|
| 91 |
+
max_length: Longueur maximale de la génération
|
| 92 |
+
temperature: Contrôle la créativité (plus élevé = plus créatif)
|
| 93 |
+
|
| 94 |
+
Returns:
|
| 95 |
+
Texte généré
|
| 96 |
+
"""
|
| 97 |
+
if not self.is_trained:
|
| 98 |
+
raise ValueError("Le modèle n'est pas entraîné. Utilisez train() d'abord.")
|
| 99 |
+
|
| 100 |
+
# Commencer par START (^)
|
| 101 |
+
current_idx = self.char_to_idx['^']
|
| 102 |
+
result = []
|
| 103 |
+
|
| 104 |
+
for _ in range(max_length):
|
| 105 |
+
# Obtenir les probabilités pour le prochain caractère
|
| 106 |
+
probs = self.transition_matrix[current_idx].copy()
|
| 107 |
+
|
| 108 |
+
# Appliquer la température
|
| 109 |
+
if temperature != 1.0:
|
| 110 |
+
probs = np.power(probs, 1.0 / temperature)
|
| 111 |
+
probs = probs / probs.sum()
|
| 112 |
+
|
| 113 |
+
# Échantillonner le prochain caractère
|
| 114 |
+
if probs.sum() == 0:
|
| 115 |
+
break
|
| 116 |
+
|
| 117 |
+
next_idx = np.random.choice(len(probs), p=probs)
|
| 118 |
+
next_char = self.idx_to_char[next_idx]
|
| 119 |
+
|
| 120 |
+
# Si on atteint END ($), on s'arrête
|
| 121 |
+
if next_char == '$':
|
| 122 |
+
break
|
| 123 |
+
|
| 124 |
+
result.append(next_char)
|
| 125 |
+
current_idx = next_idx
|
| 126 |
+
|
| 127 |
+
return ''.join(result).capitalize()
|
| 128 |
+
|
| 129 |
+
def generate_batch(self, n: int = 5, max_length: int = 20, temperature: float = 1.0) -> List[str]:
|
| 130 |
+
"""
|
| 131 |
+
Génère plusieurs résultats
|
| 132 |
+
|
| 133 |
+
Args:
|
| 134 |
+
n: Nombre de générations
|
| 135 |
+
max_length: Longueur maximale de chaque génération
|
| 136 |
+
temperature: Contrôle la créativité
|
| 137 |
+
|
| 138 |
+
Returns:
|
| 139 |
+
Liste de textes générés
|
| 140 |
+
"""
|
| 141 |
+
return [self.generate(max_length, temperature) for _ in range(n)]
|
| 142 |
+
|
| 143 |
+
def save_weights(self, filepath: str):
|
| 144 |
+
"""
|
| 145 |
+
Sauvegarde le modèle génératif dans un fichier
|
| 146 |
+
|
| 147 |
+
Args:
|
| 148 |
+
filepath: Chemin du fichier de sauvegarde (.npz)
|
| 149 |
+
|
| 150 |
+
Example:
|
| 151 |
+
>>> ai.save_weights('name_generator.npz')
|
| 152 |
+
"""
|
| 153 |
+
if not filepath.endswith('.npz'):
|
| 154 |
+
filepath += '.npz'
|
| 155 |
+
|
| 156 |
+
if not self.is_trained:
|
| 157 |
+
print("⚠️ Attention: Le modèle n'est pas entraîné")
|
| 158 |
+
|
| 159 |
+
save_data = {
|
| 160 |
+
'transition_matrix': self.transition_matrix,
|
| 161 |
+
'char_to_idx': np.array(list(self.char_to_idx.items()), dtype=object),
|
| 162 |
+
'idx_to_char': np.array(list(self.idx_to_char.items()), dtype=object),
|
| 163 |
+
'mode': np.array([self.mode]),
|
| 164 |
+
'is_trained': np.array([self.is_trained])
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
np.savez(filepath, **save_data)
|
| 168 |
+
print(f"✓ Modèle génératif sauvegardé dans '{filepath}'")
|
| 169 |
+
|
| 170 |
+
def load_weights(self, filepath: str):
|
| 171 |
+
"""
|
| 172 |
+
Charge un modèle génératif depuis un fichier
|
| 173 |
+
|
| 174 |
+
Args:
|
| 175 |
+
filepath: Chemin du fichier de sauvegarde (.npz)
|
| 176 |
+
|
| 177 |
+
Example:
|
| 178 |
+
>>> ai = sitiai.create.ai('generative')
|
| 179 |
+
>>> ai.load_weights('name_generator.npz')
|
| 180 |
+
"""
|
| 181 |
+
if not filepath.endswith('.npz'):
|
| 182 |
+
filepath += '.npz'
|
| 183 |
+
|
| 184 |
+
data = np.load(filepath, allow_pickle=True)
|
| 185 |
+
|
| 186 |
+
self.transition_matrix = data['transition_matrix']
|
| 187 |
+
self.char_to_idx = dict(data['char_to_idx'])
|
| 188 |
+
self.idx_to_char = {int(k): v for k, v in data['idx_to_char']}
|
| 189 |
+
self.mode = str(data['mode'][0])
|
| 190 |
+
self.is_trained = bool(data['is_trained'][0])
|
| 191 |
+
|
| 192 |
+
print(f"✓ Modèle génératif chargé depuis '{filepath}'")
|
| 193 |
+
|
| 194 |
+
def __repr__(self):
|
| 195 |
+
status = "entraîné" if self.is_trained else "non entraîné"
|
| 196 |
+
return f"GenerativeAI(mode='{self.mode}', status='{status}')"
|
linear.py
ADDED
|
@@ -0,0 +1,302 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Module pour les IA de prédiction linéaire
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
from typing import List, Tuple, Optional
|
| 7 |
+
from .neuron import SitiNEUR
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class LinearAI:
|
| 11 |
+
"""
|
| 12 |
+
IA de prédiction linéaire avec réseau de neurones
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
input_size: Nombre de features en entrée
|
| 16 |
+
output_size: Nombre de sorties (1 pour régression simple)
|
| 17 |
+
hidden_layers: Liste des tailles des couches cachées
|
| 18 |
+
|
| 19 |
+
Example:
|
| 20 |
+
>>> from sitiai import create
|
| 21 |
+
>>> ai = create.ai('linear', input_size=3, output_size=1, hidden_layers=[10, 5])
|
| 22 |
+
>>> ai.train(X_train, y_train, epochs=100)
|
| 23 |
+
>>> predictions = ai.predict(X_test)
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
def __init__(self, input_size: int, output_size: int = 1, hidden_layers: Optional[List[int]] = None):
|
| 27 |
+
self.input_size = input_size
|
| 28 |
+
self.output_size = output_size
|
| 29 |
+
self.hidden_layers = hidden_layers or [10]
|
| 30 |
+
|
| 31 |
+
# Construire le réseau
|
| 32 |
+
self.layers: List[SitiNEUR] = []
|
| 33 |
+
|
| 34 |
+
# Première couche
|
| 35 |
+
layer_sizes = [input_size] + self.hidden_layers + [output_size]
|
| 36 |
+
|
| 37 |
+
for i in range(len(layer_sizes) - 1):
|
| 38 |
+
# Utiliser 'relu' pour les couches cachées, 'linear' pour la sortie
|
| 39 |
+
activation = 'relu' if i < len(layer_sizes) - 2 else 'linear'
|
| 40 |
+
layer = SitiNEUR(
|
| 41 |
+
input_size=layer_sizes[i],
|
| 42 |
+
output_size=layer_sizes[i + 1],
|
| 43 |
+
activation=activation
|
| 44 |
+
)
|
| 45 |
+
self.layers.append(layer)
|
| 46 |
+
|
| 47 |
+
self.is_trained = False
|
| 48 |
+
self.loss_history = []
|
| 49 |
+
|
| 50 |
+
# Paramètres de normalisation
|
| 51 |
+
self.x_mean = None
|
| 52 |
+
self.x_std = None
|
| 53 |
+
self.y_mean = None
|
| 54 |
+
self.y_std = None
|
| 55 |
+
|
| 56 |
+
def _normalize_X(self, X: np.ndarray, fit: bool = False) -> np.ndarray:
|
| 57 |
+
"""Normalise les entrées"""
|
| 58 |
+
if fit:
|
| 59 |
+
self.x_mean = np.mean(X, axis=0)
|
| 60 |
+
self.x_std = np.std(X, axis=0) + 1e-8 # Éviter division par zéro
|
| 61 |
+
|
| 62 |
+
return (X - self.x_mean) / self.x_std
|
| 63 |
+
|
| 64 |
+
def _normalize_y(self, y: np.ndarray, fit: bool = False) -> np.ndarray:
|
| 65 |
+
"""Normalise les sorties"""
|
| 66 |
+
if fit:
|
| 67 |
+
self.y_mean = np.mean(y, axis=0)
|
| 68 |
+
self.y_std = np.std(y, axis=0) + 1e-8 # Éviter division par zéro
|
| 69 |
+
|
| 70 |
+
return (y - self.y_mean) / self.y_std
|
| 71 |
+
|
| 72 |
+
def _denormalize_y(self, y: np.ndarray) -> np.ndarray:
|
| 73 |
+
"""Dénormalise les prédictions"""
|
| 74 |
+
if self.y_mean is not None and self.y_std is not None:
|
| 75 |
+
return y * self.y_std + self.y_mean
|
| 76 |
+
return y
|
| 77 |
+
|
| 78 |
+
def forward(self, x: np.ndarray) -> np.ndarray:
|
| 79 |
+
"""
|
| 80 |
+
Propagation avant à travers tout le réseau
|
| 81 |
+
|
| 82 |
+
Args:
|
| 83 |
+
x: Données d'entrée
|
| 84 |
+
|
| 85 |
+
Returns:
|
| 86 |
+
Prédictions
|
| 87 |
+
"""
|
| 88 |
+
output = x
|
| 89 |
+
for layer in self.layers:
|
| 90 |
+
output = layer.forward(output)
|
| 91 |
+
return output
|
| 92 |
+
|
| 93 |
+
def train(self, X: np.ndarray, y: np.ndarray, epochs: int = 100,
|
| 94 |
+
learning_rate: float = 0.01, batch_size: Optional[int] = None,
|
| 95 |
+
verbose: bool = True):
|
| 96 |
+
"""
|
| 97 |
+
Entraîne le modèle sur les données
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
X: Données d'entrée (shape: [n_samples, input_size])
|
| 101 |
+
y: Cibles (shape: [n_samples, output_size])
|
| 102 |
+
epochs: Nombre d'époques d'entraînement
|
| 103 |
+
learning_rate: Taux d'apprentissage
|
| 104 |
+
batch_size: Taille des mini-batches (None = batch complet)
|
| 105 |
+
verbose: Afficher les logs d'entraînement
|
| 106 |
+
"""
|
| 107 |
+
# Convertir et vérifier les données
|
| 108 |
+
X = np.array(X, dtype=np.float64)
|
| 109 |
+
y = np.array(y, dtype=np.float64)
|
| 110 |
+
|
| 111 |
+
# S'assurer que y est 2D
|
| 112 |
+
if y.ndim == 1:
|
| 113 |
+
y = y.reshape(-1, 1)
|
| 114 |
+
|
| 115 |
+
# Vérifier qu'il n'y a pas de NaN dans les données
|
| 116 |
+
if np.any(np.isnan(X)) or np.any(np.isnan(y)):
|
| 117 |
+
raise ValueError("Les données contiennent des valeurs NaN")
|
| 118 |
+
|
| 119 |
+
# Normaliser les données
|
| 120 |
+
X_norm = self._normalize_X(X, fit=True)
|
| 121 |
+
y_norm = self._normalize_y(y, fit=True)
|
| 122 |
+
|
| 123 |
+
n_samples = X_norm.shape[0]
|
| 124 |
+
batch_size = batch_size or min(32, n_samples)
|
| 125 |
+
|
| 126 |
+
self.loss_history = []
|
| 127 |
+
|
| 128 |
+
for epoch in range(epochs):
|
| 129 |
+
# Mélanger les données
|
| 130 |
+
indices = np.random.permutation(n_samples)
|
| 131 |
+
X_shuffled = X_norm[indices]
|
| 132 |
+
y_shuffled = y_norm[indices]
|
| 133 |
+
|
| 134 |
+
epoch_loss = 0
|
| 135 |
+
n_batches = 0
|
| 136 |
+
|
| 137 |
+
# Entraînement par mini-batches
|
| 138 |
+
for i in range(0, n_samples, batch_size):
|
| 139 |
+
batch_X = X_shuffled[i:i + batch_size]
|
| 140 |
+
batch_y = y_shuffled[i:i + batch_size]
|
| 141 |
+
|
| 142 |
+
# Forward pass
|
| 143 |
+
predictions = self.forward(batch_X)
|
| 144 |
+
|
| 145 |
+
# Vérifier les NaN
|
| 146 |
+
if np.any(np.isnan(predictions)):
|
| 147 |
+
if verbose:
|
| 148 |
+
print(f"⚠️ NaN détecté à l'époque {epoch + 1}, arrêt de l'entraînement")
|
| 149 |
+
break
|
| 150 |
+
|
| 151 |
+
# Calculer la perte (MSE)
|
| 152 |
+
loss = np.mean((predictions - batch_y) ** 2)
|
| 153 |
+
epoch_loss += loss
|
| 154 |
+
n_batches += 1
|
| 155 |
+
|
| 156 |
+
# Backward pass
|
| 157 |
+
grad = 2 * (predictions - batch_y) / batch_y.shape[0]
|
| 158 |
+
|
| 159 |
+
for layer in reversed(self.layers):
|
| 160 |
+
grad = layer.backward(grad, learning_rate)
|
| 161 |
+
|
| 162 |
+
if n_batches == 0:
|
| 163 |
+
break
|
| 164 |
+
|
| 165 |
+
avg_loss = epoch_loss / n_batches
|
| 166 |
+
self.loss_history.append(avg_loss)
|
| 167 |
+
|
| 168 |
+
if verbose and (epoch + 1) % 10 == 0:
|
| 169 |
+
print(f"Époque {epoch + 1}/{epochs}, Perte: {avg_loss:.6f}")
|
| 170 |
+
|
| 171 |
+
self.is_trained = True
|
| 172 |
+
|
| 173 |
+
if verbose:
|
| 174 |
+
print(f"\n✓ Entraînement terminé! Perte finale: {self.loss_history[-1]:.6f}")
|
| 175 |
+
|
| 176 |
+
def predict(self, X: np.ndarray) -> np.ndarray:
|
| 177 |
+
"""
|
| 178 |
+
Fait des prédictions sur de nouvelles données
|
| 179 |
+
|
| 180 |
+
Args:
|
| 181 |
+
X: Données d'entrée
|
| 182 |
+
|
| 183 |
+
Returns:
|
| 184 |
+
Prédictions
|
| 185 |
+
"""
|
| 186 |
+
if not self.is_trained:
|
| 187 |
+
print("⚠️ Attention: Le modèle n'est pas entraîné.")
|
| 188 |
+
|
| 189 |
+
X = np.array(X, dtype=np.float64)
|
| 190 |
+
|
| 191 |
+
# Normaliser les entrées si le modèle a été entraîné
|
| 192 |
+
if self.x_mean is not None:
|
| 193 |
+
X_norm = self._normalize_X(X, fit=False)
|
| 194 |
+
predictions_norm = self.forward(X_norm)
|
| 195 |
+
return self._denormalize_y(predictions_norm)
|
| 196 |
+
|
| 197 |
+
return self.forward(X)
|
| 198 |
+
|
| 199 |
+
def evaluate(self, X: np.ndarray, y: np.ndarray) -> Tuple[float, float]:
|
| 200 |
+
"""
|
| 201 |
+
Évalue le modèle sur des données de test
|
| 202 |
+
|
| 203 |
+
Args:
|
| 204 |
+
X: Données d'entrée
|
| 205 |
+
y: Vraies valeurs
|
| 206 |
+
|
| 207 |
+
Returns:
|
| 208 |
+
(mse, r2_score)
|
| 209 |
+
"""
|
| 210 |
+
predictions = self.predict(X)
|
| 211 |
+
y = np.array(y, dtype=np.float32)
|
| 212 |
+
|
| 213 |
+
if y.ndim == 1:
|
| 214 |
+
y = y.reshape(-1, 1)
|
| 215 |
+
|
| 216 |
+
# MSE
|
| 217 |
+
mse = np.mean((predictions - y) ** 2)
|
| 218 |
+
|
| 219 |
+
# R² score
|
| 220 |
+
ss_res = np.sum((y - predictions) ** 2)
|
| 221 |
+
ss_tot = np.sum((y - np.mean(y)) ** 2)
|
| 222 |
+
r2 = 1 - (ss_res / ss_tot) if ss_tot != 0 else 0
|
| 223 |
+
|
| 224 |
+
return mse, r2
|
| 225 |
+
|
| 226 |
+
def save_weights(self, filepath: str):
|
| 227 |
+
"""
|
| 228 |
+
Sauvegarde les poids du modèle dans un fichier
|
| 229 |
+
|
| 230 |
+
Args:
|
| 231 |
+
filepath: Chemin du fichier de sauvegarde (.npz)
|
| 232 |
+
|
| 233 |
+
Example:
|
| 234 |
+
>>> ai.save_weights('my_model.npz')
|
| 235 |
+
"""
|
| 236 |
+
if not filepath.endswith('.npz'):
|
| 237 |
+
filepath += '.npz'
|
| 238 |
+
|
| 239 |
+
weights_data = {}
|
| 240 |
+
|
| 241 |
+
# Sauvegarder les poids de chaque couche
|
| 242 |
+
for i, layer in enumerate(self.layers):
|
| 243 |
+
layer_weights = layer.get_weights()
|
| 244 |
+
weights_data[f'layer_{i}_weights'] = layer_weights['weights']
|
| 245 |
+
weights_data[f'layer_{i}_bias'] = layer_weights['bias']
|
| 246 |
+
|
| 247 |
+
# Sauvegarder les paramètres de normalisation
|
| 248 |
+
if self.x_mean is not None:
|
| 249 |
+
weights_data['x_mean'] = self.x_mean
|
| 250 |
+
weights_data['x_std'] = self.x_std
|
| 251 |
+
weights_data['y_mean'] = self.y_mean
|
| 252 |
+
weights_data['y_std'] = self.y_std
|
| 253 |
+
|
| 254 |
+
# Sauvegarder la configuration
|
| 255 |
+
weights_data['input_size'] = np.array([self.input_size])
|
| 256 |
+
weights_data['output_size'] = np.array([self.output_size])
|
| 257 |
+
weights_data['hidden_layers'] = np.array(self.hidden_layers)
|
| 258 |
+
weights_data['is_trained'] = np.array([self.is_trained])
|
| 259 |
+
|
| 260 |
+
np.savez(filepath, **weights_data)
|
| 261 |
+
print(f"✓ Modèle sauvegardé dans '{filepath}'")
|
| 262 |
+
|
| 263 |
+
def load_weights(self, filepath: str):
|
| 264 |
+
"""
|
| 265 |
+
Charge les poids du modèle depuis un fichier
|
| 266 |
+
|
| 267 |
+
Args:
|
| 268 |
+
filepath: Chemin du fichier de sauvegarde (.npz)
|
| 269 |
+
|
| 270 |
+
Example:
|
| 271 |
+
>>> ai = sitiai.create.ai('linear', input_size=3, output_size=1)
|
| 272 |
+
>>> ai.load_weights('my_model.npz')
|
| 273 |
+
"""
|
| 274 |
+
if not filepath.endswith('.npz'):
|
| 275 |
+
filepath += '.npz'
|
| 276 |
+
|
| 277 |
+
data = np.load(filepath, allow_pickle=True)
|
| 278 |
+
|
| 279 |
+
# Charger les poids de chaque couche
|
| 280 |
+
for i, layer in enumerate(self.layers):
|
| 281 |
+
weights_dict = {
|
| 282 |
+
'weights': data[f'layer_{i}_weights'],
|
| 283 |
+
'bias': data[f'layer_{i}_bias']
|
| 284 |
+
}
|
| 285 |
+
layer.set_weights(weights_dict)
|
| 286 |
+
|
| 287 |
+
# Charger les paramètres de normalisation
|
| 288 |
+
if 'x_mean' in data:
|
| 289 |
+
self.x_mean = data['x_mean']
|
| 290 |
+
self.x_std = data['x_std']
|
| 291 |
+
self.y_mean = data['y_mean']
|
| 292 |
+
self.y_std = data['y_std']
|
| 293 |
+
|
| 294 |
+
# Charger le statut
|
| 295 |
+
if 'is_trained' in data:
|
| 296 |
+
self.is_trained = bool(data['is_trained'][0])
|
| 297 |
+
|
| 298 |
+
print(f"✓ Modèle chargé depuis '{filepath}'")
|
| 299 |
+
|
| 300 |
+
def __repr__(self):
|
| 301 |
+
status = "entraîné" if self.is_trained else "non entraîné"
|
| 302 |
+
return f"LinearAI(architecture={[self.input_size] + self.hidden_layers + [self.output_size]}, status='{status}')"
|
neuron.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Module contenant la couche de neurones SitiNEUR
|
| 3 |
+
Syntaxe inspirée de PyTorch mais simplifiée
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class SitiNEUR:
|
| 10 |
+
"""
|
| 11 |
+
Couche de neurones simplifiée pour Sitiai
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
input_size: Nombre d'entrées
|
| 15 |
+
output_size: Nombre de sorties
|
| 16 |
+
activation: Fonction d'activation ('relu', 'sigmoid', 'tanh', 'linear')
|
| 17 |
+
|
| 18 |
+
Example:
|
| 19 |
+
>>> from sitiai import SitiNEUR
|
| 20 |
+
>>> layer = SitiNEUR(input_size=10, output_size=5, activation='relu')
|
| 21 |
+
>>> output = layer.forward(input_data)
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
def __init__(self, input_size: int, output_size: int, activation: str = 'relu'):
|
| 25 |
+
self.input_size = input_size
|
| 26 |
+
self.output_size = output_size
|
| 27 |
+
self.activation = activation
|
| 28 |
+
|
| 29 |
+
# Initialisation des poids et biais (He initialization pour ReLU, Xavier pour autres)
|
| 30 |
+
if activation == 'relu':
|
| 31 |
+
self.weights = np.random.randn(input_size, output_size) * np.sqrt(2.0 / input_size)
|
| 32 |
+
else:
|
| 33 |
+
self.weights = np.random.randn(input_size, output_size) * np.sqrt(1.0 / input_size)
|
| 34 |
+
self.bias = np.zeros(output_size)
|
| 35 |
+
|
| 36 |
+
# Pour la rétropropagation
|
| 37 |
+
self.last_input = None
|
| 38 |
+
self.last_output = None
|
| 39 |
+
self.last_z = None
|
| 40 |
+
|
| 41 |
+
def forward(self, x: np.ndarray) -> np.ndarray:
|
| 42 |
+
"""
|
| 43 |
+
Propagation avant
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
x: Données d'entrée (shape: [batch_size, input_size])
|
| 47 |
+
|
| 48 |
+
Returns:
|
| 49 |
+
Sortie de la couche (shape: [batch_size, output_size])
|
| 50 |
+
"""
|
| 51 |
+
self.last_input = x
|
| 52 |
+
|
| 53 |
+
# Calcul: y = x @ weights + bias
|
| 54 |
+
self.last_z = np.dot(x, self.weights) + self.bias
|
| 55 |
+
|
| 56 |
+
# Application de la fonction d'activation
|
| 57 |
+
self.last_output = self._apply_activation(self.last_z)
|
| 58 |
+
return self.last_output
|
| 59 |
+
|
| 60 |
+
def backward(self, grad_output: np.ndarray, learning_rate: float = 0.01) -> np.ndarray:
|
| 61 |
+
"""
|
| 62 |
+
Rétropropagation du gradient
|
| 63 |
+
|
| 64 |
+
Args:
|
| 65 |
+
grad_output: Gradient de la sortie
|
| 66 |
+
learning_rate: Taux d'apprentissage
|
| 67 |
+
|
| 68 |
+
Returns:
|
| 69 |
+
Gradient pour la couche précédente
|
| 70 |
+
"""
|
| 71 |
+
# Clip gradient output to prevent explosion
|
| 72 |
+
grad_output = np.clip(grad_output, -10, 10)
|
| 73 |
+
|
| 74 |
+
# Gradient de la fonction d'activation
|
| 75 |
+
if self.activation == 'relu':
|
| 76 |
+
grad_activation = (self.last_z > 0).astype(float)
|
| 77 |
+
else:
|
| 78 |
+
grad_activation = self._activation_gradient(self.last_output)
|
| 79 |
+
|
| 80 |
+
grad_z = grad_output * grad_activation
|
| 81 |
+
|
| 82 |
+
# Clip intermediate gradients
|
| 83 |
+
grad_z = np.clip(grad_z, -10, 10)
|
| 84 |
+
|
| 85 |
+
# Gradients des poids et biais
|
| 86 |
+
grad_weights = np.dot(self.last_input.T, grad_z)
|
| 87 |
+
grad_bias = np.sum(grad_z, axis=0)
|
| 88 |
+
|
| 89 |
+
# Clip parameter gradients
|
| 90 |
+
grad_weights = np.clip(grad_weights, -1, 1)
|
| 91 |
+
grad_bias = np.clip(grad_bias, -1, 1)
|
| 92 |
+
|
| 93 |
+
# Mise à jour des paramètres
|
| 94 |
+
self.weights -= learning_rate * grad_weights
|
| 95 |
+
self.bias -= learning_rate * grad_bias
|
| 96 |
+
|
| 97 |
+
# Gradient pour la couche précédente
|
| 98 |
+
grad_input = np.dot(grad_z, self.weights.T)
|
| 99 |
+
return grad_input
|
| 100 |
+
|
| 101 |
+
def _apply_activation(self, z: np.ndarray) -> np.ndarray:
|
| 102 |
+
"""Applique la fonction d'activation"""
|
| 103 |
+
if self.activation == 'relu':
|
| 104 |
+
return np.maximum(0, z)
|
| 105 |
+
elif self.activation == 'sigmoid':
|
| 106 |
+
return 1 / (1 + np.exp(-np.clip(z, -500, 500)))
|
| 107 |
+
elif self.activation == 'tanh':
|
| 108 |
+
return np.tanh(z)
|
| 109 |
+
elif self.activation == 'linear':
|
| 110 |
+
return z
|
| 111 |
+
else:
|
| 112 |
+
raise ValueError(f"Activation inconnue: {self.activation}")
|
| 113 |
+
|
| 114 |
+
def _activation_gradient(self, output: np.ndarray) -> np.ndarray:
|
| 115 |
+
"""Calcule le gradient de la fonction d'activation"""
|
| 116 |
+
if self.activation == 'relu':
|
| 117 |
+
return (output > 0).astype(float)
|
| 118 |
+
elif self.activation == 'sigmoid':
|
| 119 |
+
return output * (1 - output)
|
| 120 |
+
elif self.activation == 'tanh':
|
| 121 |
+
return 1 - output ** 2
|
| 122 |
+
elif self.activation == 'linear':
|
| 123 |
+
return np.ones_like(output)
|
| 124 |
+
else:
|
| 125 |
+
raise ValueError(f"Activation inconnue: {self.activation}")
|
| 126 |
+
|
| 127 |
+
def get_weights(self):
|
| 128 |
+
"""Retourne les poids et biais de la couche"""
|
| 129 |
+
return {
|
| 130 |
+
'weights': self.weights.copy(),
|
| 131 |
+
'bias': self.bias.copy()
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
def set_weights(self, weights_dict):
|
| 135 |
+
"""Définit les poids et biais de la couche"""
|
| 136 |
+
self.weights = weights_dict['weights'].copy()
|
| 137 |
+
self.bias = weights_dict['bias'].copy()
|
| 138 |
+
|
| 139 |
+
def __repr__(self):
|
| 140 |
+
return f"SitiNEUR(input_size={self.input_size}, output_size={self.output_size}, activation='{self.activation}')"
|
publish-to-pypi.yml
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Publier Sitiai sur PyPI
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
release:
|
| 5 |
+
types: [published]
|
| 6 |
+
|
| 7 |
+
jobs:
|
| 8 |
+
build:
|
| 9 |
+
name: Construire le package
|
| 10 |
+
runs-on: ubuntu-latest
|
| 11 |
+
steps:
|
| 12 |
+
- uses: actions/checkout@v4
|
| 13 |
+
|
| 14 |
+
- name: Installer Python
|
| 15 |
+
uses: actions/setup-python@v5
|
| 16 |
+
with:
|
| 17 |
+
python-version: "3.x"
|
| 18 |
+
|
| 19 |
+
- name: Installer les dépendances de build
|
| 20 |
+
run: |
|
| 21 |
+
python -m pip install --upgrade pip
|
| 22 |
+
pip install build
|
| 23 |
+
|
| 24 |
+
- name: Construire le package
|
| 25 |
+
run: python -m build
|
| 26 |
+
|
| 27 |
+
- name: Sauvegarder les fichiers de distribution
|
| 28 |
+
uses: actions/upload-artifact@v4
|
| 29 |
+
with:
|
| 30 |
+
name: python-package-distributions
|
| 31 |
+
path: dist/
|
| 32 |
+
|
| 33 |
+
publish-to-testpypi:
|
| 34 |
+
name: Publier sur TestPyPI
|
| 35 |
+
needs: [build]
|
| 36 |
+
runs-on: ubuntu-latest
|
| 37 |
+
environment:
|
| 38 |
+
name: testpypi
|
| 39 |
+
url: https://test.pypi.org/p/sitiai
|
| 40 |
+
permissions:
|
| 41 |
+
id-token: write
|
| 42 |
+
steps:
|
| 43 |
+
- name: Télécharger les distributions
|
| 44 |
+
uses: actions/download-artifact@v4
|
| 45 |
+
with:
|
| 46 |
+
name: python-package-distributions
|
| 47 |
+
path: dist/
|
| 48 |
+
|
| 49 |
+
- name: Publier sur TestPyPI
|
| 50 |
+
uses: pypa/gh-action-pypi-publish@release/v1
|
| 51 |
+
with:
|
| 52 |
+
repository-url: https://test.pypi.org/legacy/
|
| 53 |
+
|
| 54 |
+
publish-to-pypi:
|
| 55 |
+
name: Publier sur PyPI
|
| 56 |
+
needs: [build, publish-to-testpypi]
|
| 57 |
+
runs-on: ubuntu-latest
|
| 58 |
+
environment:
|
| 59 |
+
name: pypi
|
| 60 |
+
url: https://pypi.org/p/sitiai
|
| 61 |
+
permissions:
|
| 62 |
+
id-token: write
|
| 63 |
+
steps:
|
| 64 |
+
- name: Télécharger les distributions
|
| 65 |
+
uses: actions/download-artifact@v4
|
| 66 |
+
with:
|
| 67 |
+
name: python-package-distributions
|
| 68 |
+
path: dist/
|
| 69 |
+
|
| 70 |
+
- name: Publier sur PyPI
|
| 71 |
+
uses: pypa/gh-action-pypi-publish@release/v1
|
pyproject.toml
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[project]
|
| 2 |
+
name = "sitiai"
|
| 3 |
+
version = "0.1.0"
|
| 4 |
+
description = "Framework Python pour créer et entraîner des IA légères"
|
| 5 |
+
authors = ["Clemylia"]
|
| 6 |
+
requires-python = ">=3.11"
|
| 7 |
+
dependencies = [
|
| 8 |
+
"numpy>=1.24.0"
|
| 9 |
+
]
|
requirements-1.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
numpy>=2.3.3
|