fix: 🐛 Fix dependencies and blinder link

This commit is contained in:
Louis Gallet 2025-05-13 12:01:21 -04:00
parent f3f6fe24cd
commit d5c962305b
Signed by: lgallet
GPG Key ID: 84D3DF1528A84511
2 changed files with 185 additions and 2 deletions

View File

@ -1 +1,184 @@
{"cells":[{"metadata":{},"cell_type":"markdown","source":"# Notebook entrainement d'une IA à résoudre le XOR\n[![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/git/http%3A%2F%2Fgitea.louisgallet.fr%2FCours-particulier%2FNotebooks/master?urlpath=%2Fdoc%2Ftree%2FNotebook_IA_Training.ipynb)\n\nLa table de veritée du XOR est la suivante:\n> x: entrée 1 ; y: entrée 2 ; r: sortie\n\nx = 0 ; y = 0 ; r = 0 \nx = 1 ; y = 0 ; r = 1 \nx = 0 ; y = 1 ; r = 1 \nx = 1 ; y = 1 ; r = 0 "},{"metadata":{"id":"xwVyblbIKSYS","trusted":true},"cell_type":"code","source":"import numpy as np\nimport matplotlib.pyplot as plt","execution_count":null,"outputs":[]},{"metadata":{"id":"wgVHhV0zKeOV","trusted":true},"cell_type":"code","source":"def sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\ndef sigmoid_derivative(x):\n return x * (1 - x)","execution_count":null,"outputs":[]},{"metadata":{"id":"Bv03so9jKhZH","trusted":true},"cell_type":"code","source":"# Entrées et sorties du XOR\nX = np.array([[0,0],[0,1],[1,0],[1,1]])\ny = np.array([[0],[1],[1],[0]])","execution_count":null,"outputs":[]},{"metadata":{"id":"-utexs_vKjws","trusted":true},"cell_type":"code","source":"np.random.seed(42)\n\n# 2 neurones en entrée, 3 neurones cachés, 1 neurone en sortie\nw1 = np.random.randn(2, 4)\nb1 = np.zeros((1, 4))\n\nw2 = np.random.randn(4, 1)\nb2 = np.zeros((1, 1))\n\n# Pour enregistrer la perte au fil des époques\nloss_history = []","execution_count":null,"outputs":[]},{"metadata":{"id":"AGlpWtKdKmcZ","trusted":true},"cell_type":"code","source":"learning_rate = 0.1\nepochs = 10000\n\nfor epoch in range(epochs):\n # Forward pass\n z1 = np.dot(X, w1) + b1\n a1 = sigmoid(z1)\n\n z2 = np.dot(a1, w2) + b2\n a2 = sigmoid(z2)\n\n # Calcul de l'erreur\n loss = np.mean((y - a2) ** 2)\n loss_history.append(loss)\n\n # Backpropagation\n d_a2 = (a2 - y)\n d_z2 = d_a2 * sigmoid_derivative(a2)\n\n d_w2 = np.dot(a1.T, d_z2)\n d_b2 = np.sum(d_z2, axis=0, keepdims=True)\n\n d_a1 = np.dot(d_z2, w2.T)\n d_z1 = d_a1 * sigmoid_derivative(a1)\n\n d_w1 = np.dot(X.T, d_z1)\n d_b1 = np.sum(d_z1, axis=0, keepdims=True)\n\n # Mise à jour des poids\n w2 -= learning_rate * d_w2\n b2 -= learning_rate * d_b2\n w1 -= learning_rate * d_w1\n b1 -= learning_rate * d_b1","execution_count":null,"outputs":[]},{"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":474},"id":"m60PeuYnKoie","outputId":"d5e217cf-a0b9-49a8-a01b-d9575b7f797f","trusted":true},"cell_type":"code","source":"plt.plot(loss_history)\nplt.title(\"Perte d'erreurs au fil des époques\")\nplt.xlabel(\"Époques\")\nplt.ylabel(\"Taux d'erreur moyen\")\nplt.grid(True)\nplt.show()","execution_count":null,"outputs":[]},{"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"A2sN1_fHKrh8","outputId":"fa906657-126c-4e13-cb6e-c6e7afd9bfd7","trusted":true},"cell_type":"code","source":"print(\"Sortie finale après entraînement :\")\na1 = sigmoid(np.dot(X, w1) + b1)\na2 = sigmoid(np.dot(a1, w2) + b2)\nfor i in range(4):\n print(f\"Entrée : {X[i]} → Prédit : {a2[i][0]:.4f} → Arrondi : {round(a2[i][0])}\")","execution_count":null,"outputs":[]}],"metadata":{"colab":{"provenance":[]},"kernelspec":{"name":"python3","display_name":"Python 3"}},"nbformat":4,"nbformat_minor":2}
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Notebook entrainement d'une IA à résoudre le XOR\n",
"[![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/git/http%3A%2F%2Fgitea.louisgallet.fr%2FCours-particulier%2FNotebooks/master?urlpath=%2Fdoc%2Ftree%2FNotebook_IA_Training.ipynb)\n",
"\n",
"La table de veritée du XOR est la suivante:\n",
"> x: entrée 1 ; y: entrée 2 ; r: sortie\n",
"\n",
"x = 0 ; y = 0 ; r = 0 \n",
"x = 1 ; y = 0 ; r = 1 \n",
"x = 0 ; y = 1 ; r = 1 \n",
"x = 1 ; y = 1 ; r = 0 "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "xwVyblbIKSYS",
"trusted": true
},
"outputs": [],
"source": [
"!pip install numpy\n",
"!pip install matplotlib\n",
"import numpy as np\n",
"import matplotlib.pyplot as plt"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "wgVHhV0zKeOV",
"trusted": true
},
"outputs": [],
"source": [
"def sigmoid(x):\n",
" return 1 / (1 + np.exp(-x))\n",
"\n",
"def sigmoid_derivative(x):\n",
" return x * (1 - x)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "Bv03so9jKhZH",
"trusted": true
},
"outputs": [],
"source": [
"# Entrées et sorties du XOR\n",
"X = np.array([[0,0],[0,1],[1,0],[1,1]])\n",
"y = np.array([[0],[1],[1],[0]])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "-utexs_vKjws",
"trusted": true
},
"outputs": [],
"source": [
"np.random.seed(42)\n",
"\n",
"# 2 neurones en entrée, 3 neurones cachés, 1 neurone en sortie\n",
"w1 = np.random.randn(2, 4)\n",
"b1 = np.zeros((1, 4))\n",
"\n",
"w2 = np.random.randn(4, 1)\n",
"b2 = np.zeros((1, 1))\n",
"\n",
"# Pour enregistrer la perte au fil des époques\n",
"loss_history = []"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "AGlpWtKdKmcZ",
"trusted": true
},
"outputs": [],
"source": [
"learning_rate = 0.1\n",
"epochs = 10000\n",
"\n",
"for epoch in range(epochs):\n",
" # Forward pass\n",
" z1 = np.dot(X, w1) + b1\n",
" a1 = sigmoid(z1)\n",
"\n",
" z2 = np.dot(a1, w2) + b2\n",
" a2 = sigmoid(z2)\n",
"\n",
" # Calcul de l'erreur\n",
" loss = np.mean((y - a2) ** 2)\n",
" loss_history.append(loss)\n",
"\n",
" # Backpropagation\n",
" d_a2 = (a2 - y)\n",
" d_z2 = d_a2 * sigmoid_derivative(a2)\n",
"\n",
" d_w2 = np.dot(a1.T, d_z2)\n",
" d_b2 = np.sum(d_z2, axis=0, keepdims=True)\n",
"\n",
" d_a1 = np.dot(d_z2, w2.T)\n",
" d_z1 = d_a1 * sigmoid_derivative(a1)\n",
"\n",
" d_w1 = np.dot(X.T, d_z1)\n",
" d_b1 = np.sum(d_z1, axis=0, keepdims=True)\n",
"\n",
" # Mise à jour des poids\n",
" w2 -= learning_rate * d_w2\n",
" b2 -= learning_rate * d_b2\n",
" w1 -= learning_rate * d_w1\n",
" b1 -= learning_rate * d_b1"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 474
},
"id": "m60PeuYnKoie",
"outputId": "d5e217cf-a0b9-49a8-a01b-d9575b7f797f",
"trusted": true
},
"outputs": [],
"source": [
"plt.plot(loss_history)\n",
"plt.title(\"Perte d'erreurs au fil des époques\")\n",
"plt.xlabel(\"Époques\")\n",
"plt.ylabel(\"Taux d'erreur moyen\")\n",
"plt.grid(True)\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "A2sN1_fHKrh8",
"outputId": "fa906657-126c-4e13-cb6e-c6e7afd9bfd7",
"trusted": true
},
"outputs": [],
"source": [
"print(\"Sortie finale après entraînement :\")\n",
"a1 = sigmoid(np.dot(X, w1) + b1)\n",
"a2 = sigmoid(np.dot(a1, w2) + b2)\n",
"for i in range(4):\n",
" print(f\"Entrée : {X[i]} → Prédit : {a2[i][0]:.4f} → Arrondi : {round(a2[i][0])}\")"
]
}
],
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -1,4 +1,4 @@
# IA Training XOR
[![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/git/http%3A%2F%2Fgitea.louisgallet.fr%2FCours-particulier%2FNotebooks/master?urlpath=%2Fdoc%2Ftree%2FNotebook_IA_Training.ipynb)
[![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/git/https%3A%2F%2Fgitea.louisgallet.fr%2FCours-particulier%2FNotebooks/master?urlpath=%2Fdoc%2Ftree%2FIA_Training%2FNotebook_IA_Training.ipynb)
This is a simple XOR neural network training script.