diff --git a/your-code/challenge-1.ipynb b/your-code/challenge-1.ipynb index 2487c5f..03b25d8 100644 --- a/your-code/challenge-1.ipynb +++ b/your-code/challenge-1.ipynb @@ -32,13 +32,219 @@ "1. Normalize the input data." ] }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "# your code here\n", + "import pandas as pd\n", + "tic_tac_toe = pd.read_csv('tic-tac-toe.csv')" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "RangeIndex: 958 entries, 0 to 957\n", + "Data columns (total 10 columns):\n", + " # Column Non-Null Count Dtype \n", + "--- ------ -------------- ----- \n", + " 0 TL 958 non-null object\n", + " 1 TM 958 non-null object\n", + " 2 TR 958 non-null object\n", + " 3 ML 958 non-null object\n", + " 4 MM 958 non-null object\n", + " 5 MR 958 non-null object\n", + " 6 BL 958 non-null object\n", + " 7 BM 958 non-null object\n", + " 8 BR 958 non-null object\n", + " 9 class 958 non-null bool \n", + "dtypes: bool(1), object(9)\n", + "memory usage: 68.4+ KB\n" + ] + }, + { + "data": { + "text/plain": [ + "TL 0\n", + "TM 0\n", + "TR 0\n", + "ML 0\n", + "MM 0\n", + "MR 0\n", + "BL 0\n", + "BM 0\n", + "BR 0\n", + "class 0\n", + "dtype: int64" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "tic_tac_toe.shape\n", + "tic_tac_toe.info()\n", + "tic_tac_toe.isna().sum()" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['TL', 'TM', 'TR', 'ML', 'MM', 'MR', 'BL', 'BM', 'BR', 'class']" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "['TL', 'TM', 'TR', 'ML', 'MM', 'MR', 'BL', 'BM', 'BR']" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "coll=list(tic_tac_toe.columns)\n", + "display(coll)\n", + "coll.remove('class')\n", + "coll" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn import preprocessing\n", + "\n", + "le = preprocessing.LabelEncoder()\n", + "\n", + "for col in coll:\n", + " le.fit(tic_tac_toe[col])\n", + " tic_tac_toe[col]=le.transform(tic_tac_toe[col])" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "#from sklearn import preprocessing\n", + "## applying normalization to my dataset\n", + "#norm = preprocessing.MinMaxScaler()\n", + "#X_train = norm.fit_transform(X_train)\n", + "#X_test = norm.transform(X_test)\n", + "\n", + "#Normalized after splitting" + ] + }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], "source": [ - "# your code here" + "from sklearn import preprocessing\n", + "\n", + "\n", + "X = tic_tac_toe[['TL', 'TM', 'TR', 'ML','MM' , 'MR', 'BL', 'BM', 'BR']]\n", + "y = tic_tac_toe['class']\n", + "\n", + "\n", + "std = preprocessing.StandardScaler()\n", + "X = std.fit_transform(X)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[ 1.00322257, 1.08495342, 1.00322257, ..., 1.00322257,\n", + " -0.16731812, -0.28682739],\n", + " [ 1.00322257, 1.08495342, 1.00322257, ..., -0.28682739,\n", + " 1.08495342, -0.28682739],\n", + " [ 1.00322257, 1.08495342, 1.00322257, ..., -0.28682739,\n", + " -0.16731812, 1.00322257],\n", + " ...,\n", + " [-0.28682739, 1.08495342, -0.28682739, ..., 1.00322257,\n", + " -0.16731812, 1.00322257],\n", + " [-0.28682739, 1.08495342, -0.28682739, ..., 1.00322257,\n", + " -0.16731812, 1.00322257],\n", + " [-0.28682739, -0.16731812, 1.00322257, ..., -0.28682739,\n", + " 1.08495342, 1.00322257]])" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "X" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "0 True\n", + "1 True\n", + "2 True\n", + "3 True\n", + "4 True\n", + " ... \n", + "953 False\n", + "954 False\n", + "955 False\n", + "956 False\n", + "957 False\n", + "Name: class, Length: 958, dtype: bool" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "y" ] }, { @@ -58,13 +264,348 @@ "1. Save your model as `tic-tac-toe.model`." ] }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.model_selection import train_test_split\n", + "X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[ 1.00322257, -0.16731812, -0.28682739, ..., 1.00322257,\n", + " -1.41958965, 1.00322257],\n", + " [-1.57687736, -1.41958965, -1.57687736, ..., 1.00322257,\n", + " 1.08495342, 1.00322257],\n", + " [-0.28682739, 1.08495342, -1.57687736, ..., -0.28682739,\n", + " -1.41958965, -0.28682739],\n", + " ...,\n", + " [ 1.00322257, 1.08495342, -0.28682739, ..., -0.28682739,\n", + " -0.16731812, -1.57687736],\n", + " [-1.57687736, -0.16731812, 1.00322257, ..., 1.00322257,\n", + " 1.08495342, 1.00322257],\n", + " [ 1.00322257, -0.16731812, 1.00322257, ..., -0.28682739,\n", + " -0.16731812, -0.28682739]])" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "X_train" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[-1.57687736, 1.08495342, 1.00322257, ..., -0.28682739,\n", + " -0.16731812, -0.28682739],\n", + " [-1.57687736, 1.08495342, -0.28682739, ..., -0.28682739,\n", + " -0.16731812, -1.57687736],\n", + " [ 1.00322257, 1.08495342, 1.00322257, ..., -0.28682739,\n", + " -0.16731812, 1.00322257],\n", + " ...,\n", + " [ 1.00322257, -1.41958965, -1.57687736, ..., 1.00322257,\n", + " -0.16731812, -1.57687736],\n", + " [ 1.00322257, -1.41958965, -1.57687736, ..., 1.00322257,\n", + " 1.08495342, -0.28682739],\n", + " [ 1.00322257, 1.08495342, 1.00322257, ..., -0.28682739,\n", + " -0.16731812, -1.57687736]])" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "X_test" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "162 True\n", + "624 True\n", + "346 True\n", + "571 True\n", + "947 False\n", + " ... \n", + "835 False\n", + "192 True\n", + "629 False\n", + "559 True\n", + "684 False\n", + "Name: class, Length: 718, dtype: bool" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "y_train" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "from keras.models import Sequential\n", + "from keras.layers import Dense\n", + "from keras.utils import to_categorical" + ] + }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], "source": [ - "# your code here" + "model = Sequential([ # as far as we know, all networks are sequential\n", + " Dense(64, activation='relu', input_shape=(9,)), # 784= 28*28 dense networks means all neurons in one layer are connected to all neuronsof the next layer\n", + " Dense(64, activation='relu'), # choosing relu instead of sigmoid, this is somewhat common\n", + " Dense(10, activation='softmax'), # the softmax actiavation is the last one to compensate for the high volume additions\n", + "])\n", + "\n", + "model.compile(\n", + " optimizer='adam', #here we could use stochastic gradient descent, but adam is a de facto standard\n", + " loss='sparse_categorical_crossentropy', #this is how we create the original blam to play the blame game\n", + " metrics=['accuracy'],\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 1/30\n", + "48/48 [==============================] - 0s 674us/step - loss: 1.2557 - accuracy: 0.5989\n", + "Epoch 2/30\n", + "48/48 [==============================] - 0s 537us/step - loss: 0.6503 - accuracy: 0.6838\n", + "Epoch 3/30\n", + "48/48 [==============================] - 0s 499us/step - loss: 0.5793 - accuracy: 0.7131\n", + "Epoch 4/30\n", + "48/48 [==============================] - 0s 574us/step - loss: 0.5560 - accuracy: 0.7187\n", + "Epoch 5/30\n", + "48/48 [==============================] - 0s 520us/step - loss: 0.5211 - accuracy: 0.7604\n", + "Epoch 6/30\n", + "48/48 [==============================] - 0s 499us/step - loss: 0.4908 - accuracy: 0.7925\n", + "Epoch 7/30\n", + "48/48 [==============================] - 0s 587us/step - loss: 0.4624 - accuracy: 0.7981\n", + "Epoch 8/30\n", + "48/48 [==============================] - 0s 559us/step - loss: 0.4358 - accuracy: 0.8259\n", + "Epoch 9/30\n", + "48/48 [==============================] - 0s 529us/step - loss: 0.4114 - accuracy: 0.8357\n", + "Epoch 10/30\n", + "48/48 [==============================] - 0s 571us/step - loss: 0.3881 - accuracy: 0.8440\n", + "Epoch 11/30\n", + "48/48 [==============================] - 0s 593us/step - loss: 0.3589 - accuracy: 0.8579\n", + "Epoch 12/30\n", + "48/48 [==============================] - 0s 519us/step - loss: 0.3352 - accuracy: 0.8802\n", + "Epoch 13/30\n", + "48/48 [==============================] - 0s 499us/step - loss: 0.3162 - accuracy: 0.8900\n", + "Epoch 14/30\n", + "48/48 [==============================] - 0s 585us/step - loss: 0.2945 - accuracy: 0.8914\n", + "Epoch 15/30\n", + "48/48 [==============================] - 0s 583us/step - loss: 0.2751 - accuracy: 0.9011\n", + "Epoch 16/30\n", + "48/48 [==============================] - 0s 547us/step - loss: 0.2583 - accuracy: 0.9067\n", + "Epoch 17/30\n", + "48/48 [==============================] - 0s 562us/step - loss: 0.2476 - accuracy: 0.9067\n", + "Epoch 18/30\n", + "48/48 [==============================] - 0s 597us/step - loss: 0.2244 - accuracy: 0.9262\n", + "Epoch 19/30\n", + "48/48 [==============================] - 0s 552us/step - loss: 0.2078 - accuracy: 0.9387\n", + "Epoch 20/30\n", + "48/48 [==============================] - 0s 519us/step - loss: 0.1977 - accuracy: 0.9359\n", + "Epoch 21/30\n", + "48/48 [==============================] - 0s 699us/step - loss: 0.1806 - accuracy: 0.9415\n", + "Epoch 22/30\n", + "48/48 [==============================] - 0s 506us/step - loss: 0.1681 - accuracy: 0.9526\n", + "Epoch 23/30\n", + "48/48 [==============================] - 0s 559us/step - loss: 0.1556 - accuracy: 0.9624\n", + "Epoch 24/30\n", + "48/48 [==============================] - 0s 593us/step - loss: 0.1476 - accuracy: 0.9540\n", + "Epoch 25/30\n", + "48/48 [==============================] - 0s 561us/step - loss: 0.1310 - accuracy: 0.9708\n", + "Epoch 26/30\n", + "48/48 [==============================] - 0s 603us/step - loss: 0.1219 - accuracy: 0.9777\n", + "Epoch 27/30\n", + "48/48 [==============================] - 0s 612us/step - loss: 0.1132 - accuracy: 0.9721\n", + "Epoch 28/30\n", + "48/48 [==============================] - 0s 553us/step - loss: 0.1028 - accuracy: 0.9819\n", + "Epoch 29/30\n", + "48/48 [==============================] - 0s 545us/step - loss: 0.0993 - accuracy: 0.9819\n", + "Epoch 30/30\n", + "48/48 [==============================] - 0s 572us/step - loss: 0.0883 - accuracy: 0.9861\n" + ] + } + ], + "source": [ + "history = model.fit(\n", + " X_train,\n", + " y_train, # just to make sure the outputs are not considered numeric (because, ya know, they are numbers...)\n", + " epochs=30, # go 5 times through the whole dataset\n", + " batch_size=15, # send 10 images at a time before you tweak the network again, to make it faster\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "8/8 [==============================] - 0s 627us/step - loss: 0.2517 - accuracy: 0.8917\n" + ] + }, + { + "data": { + "text/plain": [ + "[0.2517007291316986, 0.8916666507720947]" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model.evaluate(\n", + " X_test,\n", + " y_test\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From C:\\Users\\xico2\\anaconda3\\envs\\ML python 36\\lib\\site-packages\\tensorflow\\python\\training\\tracking\\tracking.py:111: Model.state_updates (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "This property should not be used in TensorFlow 2.0, as updates are applied automatically.\n", + "WARNING:tensorflow:From C:\\Users\\xico2\\anaconda3\\envs\\ML python 36\\lib\\site-packages\\tensorflow\\python\\training\\tracking\\tracking.py:111: Layer.updates (from tensorflow.python.keras.engine.base_layer) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "This property should not be used in TensorFlow 2.0, as updates are applied automatically.\n", + "INFO:tensorflow:Assets written to: C:\\Users\\xico2\\IronHack\\Labs\\lab-deep-learning\\your-code\\assets\n" + ] + } + ], + "source": [ + "import os\n", + "os.getcwd()\n", + "\n", + "model.save(os.getcwd())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [], + "source": [ + "# your code here\n" ] }, { @@ -76,15 +617,79 @@ "Now load your saved model and use it to make predictions on a few random rows in the test dataset. Check if the predictions are correct." ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "# your code here" + "\n" ] }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[0 1 1 1 1]\n" + ] + }, + { + "data": { + "text/plain": [ + "879 0\n", + "496 1\n", + "14 1\n", + "546 1\n", + "55 1\n", + "Name: class, dtype: int32" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# your code here\n", + "import keras\n", + "import numpy as np\n", + "model = keras.models.load_model(os.getcwd())\n", + "\n", + "# Predict on the first 5 rows.\n", + "predictions = model.predict(X_test[:5])\n", + "\n", + "# Print our model's predictions.\n", + "print(np.argmax(predictions, axis=1))\n", + "\n", + "display(y_test[:5].astype(int))\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, { "cell_type": "markdown", "metadata": {}, @@ -104,13 +709,206 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 85, "metadata": {}, "outputs": [], "source": [ - "# your code here" + "import tensorflow as tf\n", + "model = Sequential([# as far as we know, all networks are sequential\n", + " Dense(64, activation='relu', input_shape=(9,)),\n", + " Dense(64, activation='relu'),# 784= 28*28 dense networks means all neurons in one layer are connected to all neuronsof the next layer\n", + " Dense(64, activation='relu'),# choosing relu instead of sigmoid, this is somewhat common\n", + " Dense(10, activation='softmax'), # the softmax actiavation is the last one to compensate for the high volume additions\n", + "])\n", + "\n", + "opt = tf.keras.optimizers.Adam(learning_rate=0.001)\n", + "\n", + "model.compile(\n", + " optimizer=opt, #here we could use stochastic gradient descent, but adam is a de facto standard\n", + " loss='sparse_categorical_crossentropy', #this is how we create the original blam to play the blame game\n", + " metrics=['accuracy'],\n", + ")" ] }, + { + "cell_type": "code", + "execution_count": 86, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 1/60\n", + "48/48 [==============================] - 0s 768us/step - loss: 1.4611 - accuracy: 0.5251\n", + "Epoch 2/60\n", + "48/48 [==============================] - 0s 619us/step - loss: 0.6321 - accuracy: 0.6407\n", + "Epoch 3/60\n", + "48/48 [==============================] - 0s 589us/step - loss: 0.5845 - accuracy: 0.6950\n", + "Epoch 4/60\n", + "48/48 [==============================] - 0s 582us/step - loss: 0.5316 - accuracy: 0.7535\n", + "Epoch 5/60\n", + "48/48 [==============================] - 0s 723us/step - loss: 0.5083 - accuracy: 0.7604\n", + "Epoch 6/60\n", + "48/48 [==============================] - 0s 647us/step - loss: 0.4402 - accuracy: 0.8343\n", + "Epoch 7/60\n", + "48/48 [==============================] - 0s 570us/step - loss: 0.3927 - accuracy: 0.8454\n", + "Epoch 8/60\n", + "48/48 [==============================] - 0s 619us/step - loss: 0.3569 - accuracy: 0.8607\n", + "Epoch 9/60\n", + "48/48 [==============================] - 0s 593us/step - loss: 0.3001 - accuracy: 0.8928\n", + "Epoch 10/60\n", + "48/48 [==============================] - 0s 568us/step - loss: 0.2644 - accuracy: 0.9081\n", + "Epoch 11/60\n", + "48/48 [==============================] - 0s 586us/step - loss: 0.2254 - accuracy: 0.9206\n", + "Epoch 12/60\n", + "48/48 [==============================] - 0s 540us/step - loss: 0.1959 - accuracy: 0.9276\n", + "Epoch 13/60\n", + "48/48 [==============================] - 0s 594us/step - loss: 0.1658 - accuracy: 0.9485\n", + "Epoch 14/60\n", + "48/48 [==============================] - 0s 599us/step - loss: 0.1352 - accuracy: 0.9596\n", + "Epoch 15/60\n", + "48/48 [==============================] - 0s 591us/step - loss: 0.1102 - accuracy: 0.9749\n", + "Epoch 16/60\n", + "48/48 [==============================] - 0s 594us/step - loss: 0.0942 - accuracy: 0.9805\n", + "Epoch 17/60\n", + "48/48 [==============================] - 0s 548us/step - loss: 0.0692 - accuracy: 0.9875\n", + "Epoch 18/60\n", + "48/48 [==============================] - 0s 622us/step - loss: 0.0547 - accuracy: 0.9944\n", + "Epoch 19/60\n", + "48/48 [==============================] - 0s 590us/step - loss: 0.0449 - accuracy: 0.9930\n", + "Epoch 20/60\n", + "48/48 [==============================] - 0s 580us/step - loss: 0.0337 - accuracy: 0.9986\n", + "Epoch 21/60\n", + "48/48 [==============================] - 0s 569us/step - loss: 0.0276 - accuracy: 1.0000\n", + "Epoch 22/60\n", + "48/48 [==============================] - 0s 583us/step - loss: 0.0213 - accuracy: 1.0000\n", + "Epoch 23/60\n", + "48/48 [==============================] - 0s 734us/step - loss: 0.0192 - accuracy: 1.0000\n", + "Epoch 24/60\n", + "48/48 [==============================] - 0s 556us/step - loss: 0.0150 - accuracy: 1.0000\n", + "Epoch 25/60\n", + "48/48 [==============================] - 0s 676us/step - loss: 0.0119 - accuracy: 1.0000\n", + "Epoch 26/60\n", + "48/48 [==============================] - 0s 674us/step - loss: 0.0096 - accuracy: 1.0000\n", + "Epoch 27/60\n", + "48/48 [==============================] - 0s 591us/step - loss: 0.0081 - accuracy: 1.0000\n", + "Epoch 28/60\n", + "48/48 [==============================] - 0s 663us/step - loss: 0.0071 - accuracy: 1.0000\n", + "Epoch 29/60\n", + "48/48 [==============================] - 0s 596us/step - loss: 0.0062 - accuracy: 1.0000\n", + "Epoch 30/60\n", + "48/48 [==============================] - 0s 586us/step - loss: 0.0054 - accuracy: 1.0000\n", + "Epoch 31/60\n", + "48/48 [==============================] - 0s 588us/step - loss: 0.0047 - accuracy: 1.0000\n", + "Epoch 32/60\n", + "48/48 [==============================] - 0s 631us/step - loss: 0.0042 - accuracy: 1.0000\n", + "Epoch 33/60\n", + "48/48 [==============================] - 0s 650us/step - loss: 0.0037 - accuracy: 1.0000\n", + "Epoch 34/60\n", + "48/48 [==============================] - 0s 664us/step - loss: 0.0033 - accuracy: 1.0000\n", + "Epoch 35/60\n", + "48/48 [==============================] - 0s 592us/step - loss: 0.0030 - accuracy: 1.0000\n", + "Epoch 36/60\n", + "48/48 [==============================] - 0s 568us/step - loss: 0.0026 - accuracy: 1.0000\n", + "Epoch 37/60\n", + "48/48 [==============================] - 0s 760us/step - loss: 0.0024 - accuracy: 1.0000\n", + "Epoch 38/60\n", + "48/48 [==============================] - 0s 605us/step - loss: 0.0021 - accuracy: 1.0000\n", + "Epoch 39/60\n", + "48/48 [==============================] - 0s 594us/step - loss: 0.0020 - accuracy: 1.0000\n", + "Epoch 40/60\n", + "48/48 [==============================] - 0s 707us/step - loss: 0.0018 - accuracy: 1.0000\n", + "Epoch 41/60\n", + "48/48 [==============================] - 0s 576us/step - loss: 0.0016 - accuracy: 1.0000\n", + "Epoch 42/60\n", + "48/48 [==============================] - 0s 627us/step - loss: 0.0015 - accuracy: 1.0000\n", + "Epoch 43/60\n", + "48/48 [==============================] - 0s 560us/step - loss: 0.0014 - accuracy: 1.0000\n", + "Epoch 44/60\n", + "48/48 [==============================] - 0s 582us/step - loss: 0.0013 - accuracy: 1.0000\n", + "Epoch 45/60\n", + "48/48 [==============================] - 0s 596us/step - loss: 0.0012 - accuracy: 1.0000\n", + "Epoch 46/60\n", + "48/48 [==============================] - 0s 565us/step - loss: 0.0011 - accuracy: 1.0000\n", + "Epoch 47/60\n", + "48/48 [==============================] - 0s 685us/step - loss: 0.0010 - accuracy: 1.0000\n", + "Epoch 48/60\n", + "48/48 [==============================] - 0s 584us/step - loss: 9.6424e-04 - accuracy: 1.0000\n", + "Epoch 49/60\n", + "48/48 [==============================] - 0s 740us/step - loss: 8.9443e-04 - accuracy: 1.0000\n", + "Epoch 50/60\n", + "48/48 [==============================] - 0s 598us/step - loss: 8.3055e-04 - accuracy: 1.0000\n", + "Epoch 51/60\n", + "48/48 [==============================] - 0s 647us/step - loss: 7.9388e-04 - accuracy: 1.0000\n", + "Epoch 52/60\n", + "48/48 [==============================] - 0s 603us/step - loss: 7.3674e-04 - accuracy: 1.0000\n", + "Epoch 53/60\n", + "48/48 [==============================] - 0s 644us/step - loss: 6.8690e-04 - accuracy: 1.0000\n", + "Epoch 54/60\n", + "48/48 [==============================] - 0s 727us/step - loss: 6.4070e-04 - accuracy: 1.0000\n", + "Epoch 55/60\n", + "48/48 [==============================] - 0s 617us/step - loss: 6.1734e-04 - accuracy: 1.0000\n", + "Epoch 56/60\n", + "48/48 [==============================] - 0s 621us/step - loss: 5.8071e-04 - accuracy: 1.0000\n", + "Epoch 57/60\n", + "48/48 [==============================] - 0s 626us/step - loss: 5.4303e-04 - accuracy: 1.0000\n", + "Epoch 58/60\n", + "48/48 [==============================] - 0s 645us/step - loss: 5.1229e-04 - accuracy: 1.0000\n", + "Epoch 59/60\n", + "48/48 [==============================] - 0s 584us/step - loss: 4.8246e-04 - accuracy: 1.0000\n", + "Epoch 60/60\n", + "48/48 [==============================] - 0s 617us/step - loss: 4.5750e-04 - accuracy: 1.0000\n" + ] + } + ], + "source": [ + "# your code here\n", + "history = model.fit(\n", + " X_train,\n", + " y_train, # just to make sure the outputs are not considered numeric (because, ya know, they are numbers...)\n", + " epochs=60, # go 5 times through the whole dataset\n", + " batch_size=15, # send 10 images at a time before you tweak the network again, to make it faster\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 87, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "8/8 [==============================] - 0s 759us/step - loss: 0.1368 - accuracy: 0.9625\n" + ] + }, + { + "data": { + "text/plain": [ + "[0.13681451976299286, 0.9624999761581421]" + ] + }, + "execution_count": 87, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model.evaluate(\n", + " X_test,\n", + " y_test\n", + ")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, { "cell_type": "markdown", "metadata": {}, @@ -120,12 +918,31 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 84, "metadata": {}, "outputs": [], "source": [ - "# your answer here" + "# your answer here\n", + "#I could see that by adding more hidden layers it increases that accuracy very quickly right in the first epochs\n", + "#As we increase the learning rate from 0.01 to 0.10 the model get's stuck on a nrealy 0.65 and do not evolves along the epochs\n", + "#I also tweaked the number of epochs to a higer value\n", + "\n", + "#Still I didn't figured out what would be the most appropriate neural network features/schema to this problem! A mini class would be awsome!" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { @@ -144,7 +961,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.3" + "version": "3.6.12" } }, "nbformat": 4, diff --git a/your-code/neuron.JPG b/your-code/neuron.JPG new file mode 100644 index 0000000..94c1e7c Binary files /dev/null and b/your-code/neuron.JPG differ diff --git a/your-code/neuron2.JPG b/your-code/neuron2.JPG new file mode 100644 index 0000000..34d912a Binary files /dev/null and b/your-code/neuron2.JPG differ diff --git a/your-code/saved_model.pb b/your-code/saved_model.pb new file mode 100644 index 0000000..ed08a69 Binary files /dev/null and b/your-code/saved_model.pb differ diff --git a/your-code/tic-tac-toe.model b/your-code/tic-tac-toe.model new file mode 100644 index 0000000..e69de29 diff --git a/your-code/variables/variables.data-00000-of-00001 b/your-code/variables/variables.data-00000-of-00001 new file mode 100644 index 0000000..d7f445b Binary files /dev/null and b/your-code/variables/variables.data-00000-of-00001 differ diff --git a/your-code/variables/variables.index b/your-code/variables/variables.index new file mode 100644 index 0000000..50c6fd3 Binary files /dev/null and b/your-code/variables/variables.index differ