From 5b7de792d322b9c5c028f165f0db249462ef9d90 Mon Sep 17 00:00:00 2001 From: Aydin <108932477+Aydinhamedi@users.noreply.github.com> Date: Tue, 19 Mar 2024 21:10:29 +0330 Subject: [PATCH 1/2] modified: BETA_E_Model_T&T.ipynb modified: Data/image_SUB_generator.pkl modified: BETA_E_Model_T&T.ipynb modified: Data/image_SUB_generator.pkl --- BETA_E_Model_T&T.ipynb | 358 ++++------------------------------- Data/image_SUB_generator.pkl | Bin 947 -> 947 bytes 2 files changed, 38 insertions(+), 320 deletions(-) diff --git a/BETA_E_Model_T&T.ipynb b/BETA_E_Model_T&T.ipynb index 1d22fdb..5d61aa3 100644 --- a/BETA_E_Model_T&T.ipynb +++ b/BETA_E_Model_T&T.ipynb @@ -46,7 +46,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 8, "metadata": { "ExecuteTime": { "end_time": "2023-12-28T02:27:47.128539500Z", @@ -83,7 +83,8 @@ "import tensorflow as tf\n", "from keras.models import Model\n", "import matplotlib.pyplot as plt\n", - "from keras.optimizers import SGD\n", + "from keras.optimizers import * # noqa: F403\n", + "from adabelief_tf import AdaBeliefOptimizer # noqa: F401\n", "\n", "# from tensorflow_addons.optimizers import Yogi\n", "from imblearn.over_sampling import SMOTE\n", @@ -200,7 +201,6 @@ "SL_EX = \"_V1\" # _NONOM_V1 | _V1 | _SDNP_V1\n", "LNTS = 0\n", "Debug_OUT = False\n", - "adjust_brightness_Mode = True\n", "RANGE_NOM = True # False for 0 to 255 True for 0 to 1 >> use False for models like ConvNeXtXLarge (⚠️deprecated⚠️)\n", "scale_data_NP_M = False # (⚠️deprecated⚠️)" ] @@ -271,25 +271,19 @@ "text": [ "\u001b[0;33mUsing Def IDG...\u001b[0m\n", "Found 23681 images belonging to 2 classes.\n", - "\u001b[0;33mLoading all images and labels into memory...\u001b[0m\n" - ] - }, - { - "ename": "KeyboardInterrupt", - "evalue": "", - "output_type": "error", - "traceback": [ - "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[1;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", - "Cell \u001b[1;32mIn[6], line 223\u001b[0m\n\u001b[0;32m 221\u001b[0m \u001b[38;5;66;03m# Load all images and labels into memory\u001b[39;00m\n\u001b[0;32m 222\u001b[0m print_Color(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mLoading all images and labels into memory...\u001b[39m\u001b[38;5;124m'\u001b[39m, [\u001b[38;5;124m'\u001b[39m\u001b[38;5;124myellow\u001b[39m\u001b[38;5;124m'\u001b[39m])\n\u001b[1;32m--> 223\u001b[0m x_train, y_train \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mnext\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;28;43miter\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mtrain_generator_SM\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 224\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m Make_EV_DATA:\n\u001b[0;32m 225\u001b[0m x_val, y_val \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mnext\u001b[39m(\u001b[38;5;28miter\u001b[39m(val_generator))\n", - "File \u001b[1;32mc:\\Users\\aydin\\Desktop\\Pneumonia AI Dev\\venv\\lib\\site-packages\\keras\\preprocessing\\image.py:156\u001b[0m, in \u001b[0;36mIterator.__next__\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 155\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__next__\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[1;32m--> 156\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mnext(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n", - "File \u001b[1;32mc:\\Users\\aydin\\Desktop\\Pneumonia AI Dev\\venv\\lib\\site-packages\\keras\\preprocessing\\image.py:168\u001b[0m, in \u001b[0;36mIterator.next\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 165\u001b[0m index_array \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mnext\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mindex_generator)\n\u001b[0;32m 166\u001b[0m \u001b[38;5;66;03m# The transformation of images is not under thread lock\u001b[39;00m\n\u001b[0;32m 167\u001b[0m \u001b[38;5;66;03m# so it can be done in parallel\u001b[39;00m\n\u001b[1;32m--> 168\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_get_batches_of_transformed_samples\u001b[49m\u001b[43m(\u001b[49m\u001b[43mindex_array\u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[1;32mc:\\Users\\aydin\\Desktop\\Pneumonia AI Dev\\venv\\lib\\site-packages\\keras\\preprocessing\\image.py:384\u001b[0m, in \u001b[0;36mBatchFromFilesMixin._get_batches_of_transformed_samples\u001b[1;34m(self, index_array)\u001b[0m\n\u001b[0;32m 382\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mimage_data_generator:\n\u001b[0;32m 383\u001b[0m params \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mimage_data_generator\u001b[38;5;241m.\u001b[39mget_random_transform(x\u001b[38;5;241m.\u001b[39mshape)\n\u001b[1;32m--> 384\u001b[0m x \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mimage_data_generator\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mapply_transform\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mparams\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 385\u001b[0m x \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mimage_data_generator\u001b[38;5;241m.\u001b[39mstandardize(x)\n\u001b[0;32m 386\u001b[0m batch_x[i] \u001b[38;5;241m=\u001b[39m x\n", - "File \u001b[1;32mc:\\Users\\aydin\\Desktop\\Pneumonia AI Dev\\venv\\lib\\site-packages\\keras\\preprocessing\\image.py:2013\u001b[0m, in \u001b[0;36mImageDataGenerator.apply_transform\u001b[1;34m(self, x, transform_parameters)\u001b[0m\n\u001b[0;32m 2010\u001b[0m img_col_axis \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcol_axis \u001b[38;5;241m-\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[0;32m 2011\u001b[0m img_channel_axis \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mchannel_axis \u001b[38;5;241m-\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[1;32m-> 2013\u001b[0m x \u001b[38;5;241m=\u001b[39m \u001b[43mapply_affine_transform\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 2014\u001b[0m \u001b[43m \u001b[49m\u001b[43mx\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2015\u001b[0m \u001b[43m \u001b[49m\u001b[43mtransform_parameters\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtheta\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2016\u001b[0m \u001b[43m \u001b[49m\u001b[43mtransform_parameters\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtx\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2017\u001b[0m \u001b[43m \u001b[49m\u001b[43mtransform_parameters\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mty\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2018\u001b[0m \u001b[43m \u001b[49m\u001b[43mtransform_parameters\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mshear\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2019\u001b[0m \u001b[43m \u001b[49m\u001b[43mtransform_parameters\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mzx\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2020\u001b[0m \u001b[43m \u001b[49m\u001b[43mtransform_parameters\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mzy\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2021\u001b[0m \u001b[43m \u001b[49m\u001b[43mrow_axis\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mimg_row_axis\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2022\u001b[0m \u001b[43m \u001b[49m\u001b[43mcol_axis\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mimg_col_axis\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2023\u001b[0m \u001b[43m \u001b[49m\u001b[43mchannel_axis\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mimg_channel_axis\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2024\u001b[0m \u001b[43m \u001b[49m\u001b[43mfill_mode\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfill_mode\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2025\u001b[0m \u001b[43m \u001b[49m\u001b[43mcval\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcval\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2026\u001b[0m \u001b[43m \u001b[49m\u001b[43morder\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minterpolation_order\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2027\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 2029\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m transform_parameters\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mchannel_shift_intensity\u001b[39m\u001b[38;5;124m\"\u001b[39m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m 2030\u001b[0m x \u001b[38;5;241m=\u001b[39m apply_channel_shift(\n\u001b[0;32m 2031\u001b[0m x,\n\u001b[0;32m 2032\u001b[0m transform_parameters[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mchannel_shift_intensity\u001b[39m\u001b[38;5;124m\"\u001b[39m],\n\u001b[0;32m 2033\u001b[0m img_channel_axis,\n\u001b[0;32m 2034\u001b[0m )\n", - "File \u001b[1;32mc:\\Users\\aydin\\Desktop\\Pneumonia AI Dev\\venv\\lib\\site-packages\\keras\\preprocessing\\image.py:2615\u001b[0m, in \u001b[0;36mapply_affine_transform\u001b[1;34m(x, theta, tx, ty, shear, zx, zy, row_axis, col_axis, channel_axis, fill_mode, cval, order)\u001b[0m\n\u001b[0;32m 2612\u001b[0m final_affine_matrix \u001b[38;5;241m=\u001b[39m transform_matrix[:\u001b[38;5;241m2\u001b[39m, :\u001b[38;5;241m2\u001b[39m]\n\u001b[0;32m 2613\u001b[0m final_offset \u001b[38;5;241m=\u001b[39m transform_matrix[:\u001b[38;5;241m2\u001b[39m, \u001b[38;5;241m2\u001b[39m]\n\u001b[1;32m-> 2615\u001b[0m channel_images \u001b[38;5;241m=\u001b[39m [\n\u001b[0;32m 2616\u001b[0m ndimage\u001b[38;5;241m.\u001b[39minterpolation\u001b[38;5;241m.\u001b[39maffine_transform(\n\u001b[0;32m 2617\u001b[0m x_channel,\n\u001b[0;32m 2618\u001b[0m final_affine_matrix,\n\u001b[0;32m 2619\u001b[0m final_offset,\n\u001b[0;32m 2620\u001b[0m order\u001b[38;5;241m=\u001b[39morder,\n\u001b[0;32m 2621\u001b[0m mode\u001b[38;5;241m=\u001b[39mfill_mode,\n\u001b[0;32m 2622\u001b[0m cval\u001b[38;5;241m=\u001b[39mcval,\n\u001b[0;32m 2623\u001b[0m )\n\u001b[0;32m 2624\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m x_channel \u001b[38;5;129;01min\u001b[39;00m x\n\u001b[0;32m 2625\u001b[0m ]\n\u001b[0;32m 2626\u001b[0m x \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39mstack(channel_images, axis\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0\u001b[39m)\n\u001b[0;32m 2627\u001b[0m x \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39mrollaxis(x, \u001b[38;5;241m0\u001b[39m, channel_axis \u001b[38;5;241m+\u001b[39m \u001b[38;5;241m1\u001b[39m)\n", - "File \u001b[1;32mc:\\Users\\aydin\\Desktop\\Pneumonia AI Dev\\venv\\lib\\site-packages\\keras\\preprocessing\\image.py:2616\u001b[0m, in \u001b[0;36m\u001b[1;34m(.0)\u001b[0m\n\u001b[0;32m 2612\u001b[0m final_affine_matrix \u001b[38;5;241m=\u001b[39m transform_matrix[:\u001b[38;5;241m2\u001b[39m, :\u001b[38;5;241m2\u001b[39m]\n\u001b[0;32m 2613\u001b[0m final_offset \u001b[38;5;241m=\u001b[39m transform_matrix[:\u001b[38;5;241m2\u001b[39m, \u001b[38;5;241m2\u001b[39m]\n\u001b[0;32m 2615\u001b[0m channel_images \u001b[38;5;241m=\u001b[39m [\n\u001b[1;32m-> 2616\u001b[0m \u001b[43mndimage\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minterpolation\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43maffine_transform\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 2617\u001b[0m \u001b[43m \u001b[49m\u001b[43mx_channel\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2618\u001b[0m \u001b[43m \u001b[49m\u001b[43mfinal_affine_matrix\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2619\u001b[0m \u001b[43m \u001b[49m\u001b[43mfinal_offset\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2620\u001b[0m \u001b[43m \u001b[49m\u001b[43morder\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43morder\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2621\u001b[0m \u001b[43m \u001b[49m\u001b[43mmode\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mfill_mode\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2622\u001b[0m \u001b[43m \u001b[49m\u001b[43mcval\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcval\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2623\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 2624\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m x_channel \u001b[38;5;129;01min\u001b[39;00m x\n\u001b[0;32m 2625\u001b[0m ]\n\u001b[0;32m 2626\u001b[0m x \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39mstack(channel_images, axis\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0\u001b[39m)\n\u001b[0;32m 2627\u001b[0m x \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39mrollaxis(x, \u001b[38;5;241m0\u001b[39m, channel_axis \u001b[38;5;241m+\u001b[39m \u001b[38;5;241m1\u001b[39m)\n", - "File \u001b[1;32mc:\\Users\\aydin\\Desktop\\Pneumonia AI Dev\\venv\\lib\\site-packages\\scipy\\ndimage\\_interpolation.py:614\u001b[0m, in \u001b[0;36maffine_transform\u001b[1;34m(input, matrix, offset, output_shape, output, order, mode, cval, prefilter)\u001b[0m\n\u001b[0;32m 611\u001b[0m _nd_image\u001b[38;5;241m.\u001b[39mzoom_shift(filtered, matrix, offset\u001b[38;5;241m/\u001b[39mmatrix, output, order,\n\u001b[0;32m 612\u001b[0m mode, cval, npad, \u001b[38;5;28;01mFalse\u001b[39;00m)\n\u001b[0;32m 613\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m--> 614\u001b[0m \u001b[43m_nd_image\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgeometric_transform\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfiltered\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmatrix\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43moffset\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 615\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43morder\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmode\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcval\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mnpad\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[0;32m 616\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m)\u001b[49m\n\u001b[0;32m 617\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m output\n", - "\u001b[1;31mKeyboardInterrupt\u001b[0m: " + "\u001b[0;33mLoading all images and labels into memory...\u001b[0m\n", + "\u001b[0;33mMaking categorical data...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mGenerating augmented data \u001b[0m\u001b[0;36m[\u001b[0m\u001b[0;32mADBD: \u001b[0m\u001b[0;31m0\u001b[0m\u001b[0;36m]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0;33mNormalizing image data...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0mData type: \u001b[0m\u001b[0;32mfloat16\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0mRGB Range: \u001b[0m\u001b[0;34mMin = 0.0\u001b[0m\u001b[0m | \u001b[0m\u001b[0;31mMax = 1.0\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0mLabel ratio: \u001b[0m\u001b[0;31m49.35% PNEUMONIA \u001b[0m\u001b[0;35m| \u001b[0m\u001b[0;32m50.65% NORMAL\u001b[0m\n", + "\u001b[0;33mSetting LNTS...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0mOriginal num_samples: \u001b[0m\u001b[0;32m23681\u001b[0m\n", + "\u001b[0;33mshuffling data...\u001b[0m\n", + "\u001b[0;33mSaving TS...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0mSample dir: \u001b[0m\u001b[0;32mSamples/TSR400_y2024_m03_d19-h13_m51_s36\u001b[0m\n", + "\u001b[0;32mDone.\u001b[0m\n" ] } ], @@ -749,7 +743,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "metadata": { "ExecuteTime": { "end_time": "2023-12-28T02:31:27.380088800Z", @@ -915,7 +909,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 13, "metadata": { "ExecuteTime": { "end_time": "2023-12-27T17:34:12.077394600Z", @@ -935,14 +929,14 @@ "Freezing 0 layers in the base model...\n", "Percentage of the base model that is frozen: 0.00%\n", "Total model layers: 814\n", - "Model: \"model\"\n", + "Model: \"model_1\"\n", "_____________________________________________________________________________________________________________\n", " Layer (type) Output Shape Param # Connected to Trainable \n", "=============================================================================================================\n", - " input_1 (InputLayer) [(None, 224, 224, 3 0 [] Y \n", + " input_2 (InputLayer) [(None, 224, 224, 3 0 [] Y \n", " )] \n", " \n", - " stem_conv (Conv2D) (None, 112, 112, 64 1728 ['input_1[0][0]'] Y \n", + " stem_conv (Conv2D) (None, 112, 112, 64 1728 ['input_2[0][0]'] Y \n", " ) \n", " \n", " stem_bn (BatchNormalization) (None, 112, 112, 64 256 ['stem_conv[0][0]'] Y \n", @@ -3063,21 +3057,21 @@ " FC_C_Avg-BatchNormalization-L1 (None, 512) 2048 ['FC_C_Dropout-L1-0.1[0][0]'] Y \n", " (BatchNormalization) \n", " \n", - " FC_C_Dense-L2-512 (Dense) (None, 512) 262656 ['FC_C_Avg-BatchNormalization-L Y \n", + " FC_C_Dense-L2-512 (Dense) (None, 256) 131328 ['FC_C_Avg-BatchNormalization-L Y \n", " 1[0][0]'] \n", " \n", - " FC_C_Avg-BatchNormalization-L2 (None, 512) 2048 ['FC_C_Dense-L2-512[0][0]'] Y \n", + " FC_C_Avg-BatchNormalization-L2 (None, 256) 1024 ['FC_C_Dense-L2-512[0][0]'] Y \n", " (BatchNormalization) \n", " \n", - " FC_C_Dense-L3-128 (Dense) (None, 128) 65664 ['FC_C_Avg-BatchNormalization-L Y \n", + " FC_C_Dense-L3-128 (Dense) (None, 128) 32896 ['FC_C_Avg-BatchNormalization-L Y \n", " 2[0][0]'] \n", " \n", " FC_OUTPUT_Dense-2 (Dense) (None, 2) 258 ['FC_C_Dense-L3-128[0][0]'] Y \n", " \n", "=============================================================================================================\n", - "Total params: 65,741,586\n", - "Trainable params: 65,428,818\n", - "Non-trainable params: 312,768\n", + "Total params: 65,576,466\n", + "Trainable params: 65,264,210\n", + "Non-trainable params: 312,256\n", "_____________________________________________________________________________________________________________\n", "done.\n" ] @@ -3129,13 +3123,13 @@ " model_EfficientNetB7_NS = Model(inputs=base_model.input, outputs=predictions)\n", " print(\"Total model layers: \", len(model_EfficientNetB7_NS.layers))\n", " # OPT/compile\n", - " opt = SGD(momentum=0.92, nesterov=False)\n", - " # opt = Nadam()\n", - " # opt = Adamax()\n", - " # opt = RMSprop(momentum=0.9)\n", - " # opt = Adagrad()\n", - " # opt = AdaBeliefOptimizer(epsilon=1e-7, rectify=False, weight_decay=5e-4, print_change_log=False, total_steps=0, amsgrad=False)\n", - " # opt = Yogi()\n", + " # opt = SGD(momentum=0.95, nesterov=False) # noqa: F405\n", + " # opt = Nadam() # noqa: F405\n", + " # opt = Adamax() # noqa: F405\n", + " # opt = RMSprop(momentum=0.9) # noqa: F405\n", + " # opt = Adagrad() # noqa: F405\n", + " opt = AdaBeliefOptimizer(epsilon=1e-7, rectify=False, weight_decay=5e-4, print_change_log=False, total_steps=300700, amsgrad=True) # noqa: F405\n", + " # opt = Yogi() # noqa: F405\n", " model_EfficientNetB7_NS.compile(\n", " optimizer=opt, loss=\"categorical_crossentropy\", metrics=[\"accuracy\"]\n", " ) # categorical_crossentropy / binary_crossentropy\n", @@ -9987,14 +9981,12 @@ " - Slow training.\n", " + Achieving higher acc.\n", " - Unstable training.\n", - "```\n", - "- TODO:\n", - " - add Pruning" + "```" ] }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 15, "metadata": { "ExecuteTime": { "end_time": "2023-12-28T07:04:23.573633300Z", @@ -10009,7 +10001,7 @@ "Training the model...\n", "\u001b[0;33m\n", "Setup Verbose:\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;36mSetting TensorBoard Log dir to \u001b[0m\u001b[0;32m[logs/fit/y2024_m03_d08-h10_m12_s05]\u001b[0m\u001b[0;36m...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;36mSetting TensorBoard Log dir to \u001b[0m\u001b[0;32m[logs/fit/y2024_m03_d19-h14_m15_s27]\u001b[0m\u001b[0;36m...\u001b[0m\n", "\u001b[0m\u001b[0m\u001b[0;36mUse_extended_tensorboard \u001b[0m\u001b[0;32m[False]\u001b[0m\u001b[0;36m.\u001b[0m\n", "\u001b[0m\u001b[0m\u001b[0;36mDebug_OUTPUT_DPS \u001b[0m\u001b[0;32m[True]\u001b[0m\u001b[0;36m.\u001b[0m\n", "\u001b[0m\u001b[0m\u001b[0;36mUse_OneCycleLr \u001b[0m\u001b[0;32m[False]\u001b[0m\u001b[0;36m.\u001b[0m\n", @@ -10020,281 +10012,7 @@ "\u001b[0m\u001b[0mEpoch: \u001b[0m\u001b[0;36m1\u001b[0m\u001b[0m/\u001b[0m\u001b[0;32m489 (TSEC: 0)\u001b[0m\u001b[0;34m | \u001b[0m\u001b[0;32m[Learning the patterns]\u001b[0m\n", "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|4096|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", "\u001b[0;33mPreparing train data...\u001b[0m\n", - "\u001b[0;33m- Fitting ImageDataGenerator...\u001b[0m\n", - "\u001b[0;33m- ImageDataGenerator fit done.\u001b[0m\n", - "\u001b[0;33m- Augmenting Image Data...\u001b[0m\n", - "\u001b[0;33m- Normalizing Image Data...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;31m- Debug DP Sample dir: \u001b[0m\u001b[0;32mSamples/TSR_SUB_400_y2024_m03_d08-h11_m01_s59\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mSetting training subset epoch.c to \u001b[0m\u001b[0;32m[6]\u001b[0m\u001b[0;33m...\u001b[0m\n", - "\u001b[0;32mTraining on subset...\u001b[0m\n", - "Epoch 1/6\n", - "256/256 [==============================] - 64s 180ms/step - loss: 6.9601 - accuracy: 0.6775 - val_loss: 3.2041 - val_accuracy: 0.6683 - lr: 0.0100\n", - "Epoch 2/6\n", - "256/256 [==============================] - 43s 166ms/step - loss: 1.9588 - accuracy: 0.7002 - val_loss: 1.2968 - val_accuracy: 0.5353 - lr: 0.0100\n", - "Epoch 3/6\n", - "256/256 [==============================] - 44s 170ms/step - loss: 0.7999 - accuracy: 0.7566 - val_loss: 0.5574 - val_accuracy: 0.7997 - lr: 0.0100\n", - "Epoch 4/6\n", - "256/256 [==============================] - 43s 169ms/step - loss: 0.5651 - accuracy: 0.7925 - val_loss: 0.8158 - val_accuracy: 0.4423 - lr: 0.0100\n", - "Epoch 5/6\n", - "256/256 [==============================] - 43s 168ms/step - loss: 0.4926 - accuracy: 0.8174 - val_loss: 0.5178 - val_accuracy: 0.7516 - lr: 0.0100\n", - "Epoch 6/6\n", - "256/256 [==============================] - 45s 175ms/step - loss: 0.4764 - accuracy: 0.8176 - val_loss: 0.4069 - val_accuracy: 0.8862 - lr: 0.0100\n", - "\u001b[0;32mSubset training done.\u001b[0m\n", - "\u001b[0;33mLoading the best weights...\u001b[0m\n", - "\u001b[0;33mLoading weights from file cache\\model_SUB_checkpoint-006-0.8862.h5...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mModel Test acc: \u001b[0m\u001b[0;32m0.8862\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mModel Test loss: \u001b[0m\u001b[0;32m0.4069\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;32mImproved model accuracy from 0.000000 to 0.886218. \u001b[0m\u001b[0;96mSaving model.\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;36mSaving full model H5 format...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;32mImproved model loss from inf to 0.40685797. \u001b[0m\u001b[0;96mSaving model.\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;36mSaving full model H5 format...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;32m(GPU-MEM)\u001b[0m\u001b[0;36m----[free: 13.52GB, used: 10.48GB, total, 24.00GB]\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(FULL): \u001b[0m\u001b[0;32m3325.30 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(SUBo): \u001b[0m\u001b[0;32m283.54 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(OTHERo): \u001b[0m\u001b[0;32m3041.76 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0;36m<---------------------------------------|Epoch [1] END|--------------------------------------->\u001b[0m\n", - "\u001b[0m\n", - "\u001b[0m\u001b[0mEpoch: \u001b[0m\u001b[0;36m2\u001b[0m\u001b[0m/\u001b[0m\u001b[0;32m489 (TSEC: 6)\u001b[0m\u001b[0;34m | \u001b[0m\u001b[0;32m[Learning the patterns]\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|4096|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", - "\u001b[0;33mPreparing train data...\u001b[0m\n", - "\u001b[0;33m- Augmenting Image Data...\u001b[0m\n", - "\u001b[0;33m- Normalizing Image Data...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mSetting training subset epoch.c to \u001b[0m\u001b[0;32m[6]\u001b[0m\u001b[0;33m...\u001b[0m\n", - "\u001b[0;32mTraining on subset...\u001b[0m\n", - "Epoch 7/12\n", - "256/256 [==============================] - 56s 182ms/step - loss: 0.4961 - accuracy: 0.8198 - val_loss: 0.6230 - val_accuracy: 0.6378 - lr: 0.0100\n", - "Epoch 8/12\n", - "256/256 [==============================] - 43s 169ms/step - loss: 0.4284 - accuracy: 0.8552 - val_loss: 1.1890 - val_accuracy: 0.5176 - lr: 0.0100\n", - "Epoch 9/12\n", - "256/256 [==============================] - 45s 174ms/step - loss: 0.3801 - accuracy: 0.8591 - val_loss: 0.4099 - val_accuracy: 0.7917 - lr: 0.0100\n", - "Epoch 10/12\n", - "256/256 [==============================] - 45s 174ms/step - loss: 0.3551 - accuracy: 0.8762 - val_loss: 0.5864 - val_accuracy: 0.6426 - lr: 0.0100\n", - "Epoch 11/12\n", - "256/256 [==============================] - 46s 178ms/step - loss: 0.3286 - accuracy: 0.8896 - val_loss: 0.3477 - val_accuracy: 0.8750 - lr: 0.0100\n", - "Epoch 12/12\n", - "256/256 [==============================] - 46s 177ms/step - loss: 0.2795 - accuracy: 0.9087 - val_loss: 0.3141 - val_accuracy: 0.8910 - lr: 0.0100\n", - "\u001b[0;32mSubset training done.\u001b[0m\n", - "\u001b[0;33mLoading the best weights...\u001b[0m\n", - "\u001b[0;33mLoading weights from file cache\\model_SUB_checkpoint-012-0.8910.h5...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mModel Test acc: \u001b[0m\u001b[0;32m0.8910\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mModel Test loss: \u001b[0m\u001b[0;32m0.3141\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;32mImproved model accuracy from 0.886218 to 0.891026. \u001b[0m\u001b[0;96mSaving model.\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;36mSaving full model H5 format...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;32mImproved model loss from 0.40685797 to 0.31407312. \u001b[0m\u001b[0;96mSaving model.\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;36mSaving full model H5 format...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;32m(GPU-MEM)\u001b[0m\u001b[0;36m----[free: 13.52GB, used: 10.48GB, total, 24.00GB]\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(FULL): \u001b[0m\u001b[0;32m930.70 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(SUBo): \u001b[0m\u001b[0;32m281.21 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(OTHERo): \u001b[0m\u001b[0;32m649.50 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0;36m<---------------------------------------|Epoch [2] END|--------------------------------------->\u001b[0m\n", - "\u001b[0m\n", - "\u001b[0m\u001b[0mEpoch: \u001b[0m\u001b[0;36m3\u001b[0m\u001b[0m/\u001b[0m\u001b[0;32m489 (TSEC: 12)\u001b[0m\u001b[0;34m | \u001b[0m\u001b[0;32m[Learning the patterns]\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|4096|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", - "\u001b[0;33mPreparing train data...\u001b[0m\n", - "\u001b[0;33m- Augmenting Image Data...\u001b[0m\n", - "\u001b[0;33m- Normalizing Image Data...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mSetting training subset epoch.c to \u001b[0m\u001b[0;32m[6]\u001b[0m\u001b[0;33m...\u001b[0m\n", - "\u001b[0;32mTraining on subset...\u001b[0m\n", - "Epoch 13/18\n", - "256/256 [==============================] - 57s 186ms/step - loss: 0.3602 - accuracy: 0.8760 - val_loss: 0.2929 - val_accuracy: 0.9022 - lr: 0.0100\n", - "Epoch 14/18\n", - "256/256 [==============================] - 44s 171ms/step - loss: 0.3254 - accuracy: 0.8962 - val_loss: 0.4938 - val_accuracy: 0.7500 - lr: 0.0100\n", - "Epoch 15/18\n", - "256/256 [==============================] - 44s 171ms/step - loss: 0.3586 - accuracy: 0.8748 - val_loss: 0.3384 - val_accuracy: 0.8846 - lr: 0.0100\n", - "Epoch 16/18\n", - "256/256 [==============================] - 44s 171ms/step - loss: 0.2835 - accuracy: 0.9119 - val_loss: 0.3132 - val_accuracy: 0.8750 - lr: 0.0100\n", - "Epoch 17/18\n", - "256/256 [==============================] - 44s 173ms/step - loss: 0.2397 - accuracy: 0.9258 - val_loss: 0.3870 - val_accuracy: 0.9327 - lr: 0.0100\n", - "Epoch 18/18\n", - "256/256 [==============================] - 44s 172ms/step - loss: 0.2420 - accuracy: 0.9236 - val_loss: 0.5111 - val_accuracy: 0.7452 - lr: 0.0100\n", - "\u001b[0;32mSubset training done.\u001b[0m\n", - "\u001b[0;33mLoading the best weights...\u001b[0m\n", - "\u001b[0;33mLoading weights from file cache\\model_SUB_checkpoint-017-0.9327.h5...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mModel Test acc: \u001b[0m\u001b[0;32m0.9327\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mModel Test loss: \u001b[0m\u001b[0;32m0.3869\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;32mImproved model accuracy from 0.891026 to 0.932692. \u001b[0m\u001b[0;96mSaving model.\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;36mSaving full model H5 format...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;91mModel loss did not improve from 0.3140731156. Not saving model.\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;32m(GPU-MEM)\u001b[0m\u001b[0;36m----[free: 13.52GB, used: 10.48GB, total, 24.00GB]\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(FULL): \u001b[0m\u001b[0;32m929.68 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(SUBo): \u001b[0m\u001b[0;32m279.01 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(OTHERo): \u001b[0m\u001b[0;32m650.67 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0;36m<---------------------------------------|Epoch [3] END|--------------------------------------->\u001b[0m\n", - "\u001b[0m\n", - "\u001b[0m\u001b[0mEpoch: \u001b[0m\u001b[0;36m4\u001b[0m\u001b[0m/\u001b[0m\u001b[0;32m489 (TSEC: 18)\u001b[0m\u001b[0;34m | \u001b[0m\u001b[0;32m[Learning the patterns]\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|4096|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", - "\u001b[0;33mPreparing train data...\u001b[0m\n", - "\u001b[0;33m- Augmenting Image Data...\u001b[0m\n", - "\u001b[0;33m- Normalizing Image Data...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mSetting training subset epoch.c to \u001b[0m\u001b[0;32m[6]\u001b[0m\u001b[0;33m...\u001b[0m\n", - "\u001b[0;32mTraining on subset...\u001b[0m\n", - "Epoch 19/24\n", - "256/256 [==============================] - 56s 183ms/step - loss: 0.3480 - accuracy: 0.8828 - val_loss: 0.2571 - val_accuracy: 0.9103 - lr: 0.0100\n", - "Epoch 20/24\n", - "256/256 [==============================] - 44s 171ms/step - loss: 0.2920 - accuracy: 0.9060 - val_loss: 0.3757 - val_accuracy: 0.8237 - lr: 0.0100\n", - "Epoch 21/24\n", - "256/256 [==============================] - 44s 172ms/step - loss: 0.2805 - accuracy: 0.9092 - val_loss: 0.3163 - val_accuracy: 0.8990 - lr: 0.0100\n", - "Epoch 22/24\n", - "256/256 [==============================] - 44s 170ms/step - loss: 0.2651 - accuracy: 0.9146 - val_loss: 0.3912 - val_accuracy: 0.8798 - lr: 0.0100\n", - "Epoch 23/24\n", - "256/256 [==============================] - 44s 171ms/step - loss: 0.2149 - accuracy: 0.9341 - val_loss: 0.2746 - val_accuracy: 0.8910 - lr: 0.0100\n", - "Epoch 24/24\n", - "256/256 [==============================] - 44s 171ms/step - loss: 0.1794 - accuracy: 0.9451 - val_loss: 0.5522 - val_accuracy: 0.6779 - lr: 0.0100\n", - "\u001b[0;32mSubset training done.\u001b[0m\n", - "\u001b[0;33mLoading the best weights...\u001b[0m\n", - "\u001b[0;33mLoading weights from file cache\\model_SUB_checkpoint-019-0.9103.h5...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mModel Test acc: \u001b[0m\u001b[0;32m0.9103\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mModel Test loss: \u001b[0m\u001b[0;32m0.2571\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;91mModel accuracy did not improve from 0.9326922894. Not saving model.\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;32mImproved model loss from 0.31407312 to 0.25707987. \u001b[0m\u001b[0;96mSaving model.\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;36mSaving full model H5 format...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;32m(GPU-MEM)\u001b[0m\u001b[0;36m----[free: 13.52GB, used: 10.48GB, total, 24.00GB]\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(FULL): \u001b[0m\u001b[0;32m946.35 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(SUBo): \u001b[0m\u001b[0;32m277.03 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(OTHERo): \u001b[0m\u001b[0;32m669.32 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0;36m<---------------------------------------|Epoch [4] END|--------------------------------------->\u001b[0m\n", - "\u001b[0m\n", - "\u001b[0m\u001b[0mEpoch: \u001b[0m\u001b[0;36m5\u001b[0m\u001b[0m/\u001b[0m\u001b[0;32m489 (TSEC: 24)\u001b[0m\u001b[0;34m | \u001b[0m\u001b[0;32m[Learning the patterns]\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|4096|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", - "\u001b[0;33mPreparing train data...\u001b[0m\n", - "\u001b[0;33m- Augmenting Image Data...\u001b[0m\n", - "\u001b[0;33m- Normalizing Image Data...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mSetting training subset epoch.c to \u001b[0m\u001b[0;32m[6]\u001b[0m\u001b[0;33m...\u001b[0m\n", - "\u001b[0;32mTraining on subset...\u001b[0m\n", - "Epoch 25/30\n", - "256/256 [==============================] - 57s 187ms/step - loss: 0.3599 - accuracy: 0.8831 - val_loss: 0.5153 - val_accuracy: 0.8381 - lr: 0.0100\n", - "Epoch 26/30\n", - "256/256 [==============================] - 45s 175ms/step - loss: 0.3223 - accuracy: 0.8838 - val_loss: 0.3604 - val_accuracy: 0.8862 - lr: 0.0100\n", - "Epoch 27/30\n", - "256/256 [==============================] - 45s 174ms/step - loss: 0.2801 - accuracy: 0.9036 - val_loss: 0.2479 - val_accuracy: 0.9022 - lr: 0.0100\n", - "Epoch 28/30\n", - "256/256 [==============================] - 44s 171ms/step - loss: 0.2989 - accuracy: 0.9060 - val_loss: 0.4376 - val_accuracy: 0.8061 - lr: 0.0100\n", - "Epoch 29/30\n", - "256/256 [==============================] - 45s 176ms/step - loss: 0.2491 - accuracy: 0.9229 - val_loss: 0.2752 - val_accuracy: 0.9119 - lr: 0.0100\n", - "Epoch 30/30\n", - "256/256 [==============================] - 44s 172ms/step - loss: 0.1930 - accuracy: 0.9434 - val_loss: 0.3148 - val_accuracy: 0.8654 - lr: 0.0100\n", - "\u001b[0;32mSubset training done.\u001b[0m\n", - "\u001b[0;33mLoading the best weights...\u001b[0m\n", - "\u001b[0;33mLoading weights from file cache\\model_SUB_checkpoint-029-0.9119.h5...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mModel Test acc: \u001b[0m\u001b[0;32m0.9135\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mModel Test loss: \u001b[0m\u001b[0;32m0.2751\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;91mModel accuracy did not improve from 0.9326922894. Not saving model.\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;91mModel loss did not improve from 0.2570798695. Not saving model.\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;32m(GPU-MEM)\u001b[0m\u001b[0;36m----[free: 13.52GB, used: 10.48GB, total, 24.00GB]\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(FULL): \u001b[0m\u001b[0;32m903.74 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(SUBo): \u001b[0m\u001b[0;32m281.25 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(OTHERo): \u001b[0m\u001b[0;32m622.49 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0;36m<---------------------------------------|Epoch [5] END|--------------------------------------->\u001b[0m\n", - "\u001b[0m\n", - "\u001b[0m\u001b[0mEpoch: \u001b[0m\u001b[0;36m6\u001b[0m\u001b[0m/\u001b[0m\u001b[0;32m489 (TSEC: 30)\u001b[0m\u001b[0;34m | \u001b[0m\u001b[0;32m[Learning the patterns]\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|4096|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", - "\u001b[0;33mPreparing train data...\u001b[0m\n", - "\u001b[0;33m- Augmenting Image Data...\u001b[0m\n", - "\u001b[0;33m- Normalizing Image Data...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mSetting training subset epoch.c to \u001b[0m\u001b[0;32m[6]\u001b[0m\u001b[0;33m...\u001b[0m\n", - "\u001b[0;32mTraining on subset...\u001b[0m\n", - "Epoch 31/36\n", - "256/256 [==============================] - 55s 180ms/step - loss: 0.3206 - accuracy: 0.8911 - val_loss: 0.3565 - val_accuracy: 0.8942 - lr: 0.0100\n", - "Epoch 32/36\n", - "256/256 [==============================] - 44s 171ms/step - loss: 0.2563 - accuracy: 0.9172 - val_loss: 0.3963 - val_accuracy: 0.7756 - lr: 0.0100\n", - "Epoch 33/36\n", - "256/256 [==============================] - 44s 170ms/step - loss: 0.2503 - accuracy: 0.9199 - val_loss: 0.2317 - val_accuracy: 0.9311 - lr: 0.0100\n", - "Epoch 34/36\n", - "256/256 [==============================] - 43s 168ms/step - loss: 0.2374 - accuracy: 0.9277 - val_loss: 0.3139 - val_accuracy: 0.8702 - lr: 0.0100\n", - "Epoch 35/36\n", - "256/256 [==============================] - 44s 169ms/step - loss: 0.1840 - accuracy: 0.9463 - val_loss: 0.2666 - val_accuracy: 0.9022 - lr: 0.0100\n", - "Epoch 36/36\n", - "256/256 [==============================] - 43s 166ms/step - loss: 0.2040 - accuracy: 0.9419 - val_loss: 0.2287 - val_accuracy: 0.9151 - lr: 0.0100\n", - "\u001b[0;32mSubset training done.\u001b[0m\n", - "\u001b[0;33mLoading the best weights...\u001b[0m\n", - "\u001b[0;33mLoading weights from file cache\\model_SUB_checkpoint-033-0.9311.h5...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mModel Test acc: \u001b[0m\u001b[0;32m0.9311\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mModel Test loss: \u001b[0m\u001b[0;32m0.2317\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;91mModel accuracy did not improve from 0.9326922894. Not saving model.\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;32mImproved model loss from 0.25707987 to 0.23173817. \u001b[0m\u001b[0;96mSaving model.\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;36mSaving full model H5 format...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;32m(GPU-MEM)\u001b[0m\u001b[0;36m----[free: 13.52GB, used: 10.48GB, total, 24.00GB]\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(FULL): \u001b[0m\u001b[0;32m907.47 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(SUBo): \u001b[0m\u001b[0;32m273.48 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(OTHERo): \u001b[0m\u001b[0;32m634.00 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0;36m<---------------------------------------|Epoch [6] END|--------------------------------------->\u001b[0m\n", - "\u001b[0m\n", - "\u001b[0m\u001b[0mEpoch: \u001b[0m\u001b[0;36m7\u001b[0m\u001b[0m/\u001b[0m\u001b[0;32m489 (TSEC: 36)\u001b[0m\u001b[0;34m | \u001b[0m\u001b[0;32m[Learning the patterns]\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|4096|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", - "\u001b[0;33mPreparing train data...\u001b[0m\n", - "\u001b[0;33m- Augmenting Image Data...\u001b[0m\n", - "\u001b[0;33m- Normalizing Image Data...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mSetting training subset epoch.c to \u001b[0m\u001b[0;32m[6]\u001b[0m\u001b[0;33m...\u001b[0m\n", - "\u001b[0;32mTraining on subset...\u001b[0m\n", - "Epoch 37/42\n", - "256/256 [==============================] - 56s 183ms/step - loss: 0.3124 - accuracy: 0.9009 - val_loss: 0.3835 - val_accuracy: 0.8029 - lr: 0.0100\n", - "Epoch 38/42\n", - "256/256 [==============================] - 46s 177ms/step - loss: 0.2644 - accuracy: 0.9167 - val_loss: 0.3267 - val_accuracy: 0.8670 - lr: 0.0100\n", - "Epoch 39/42\n", - "256/256 [==============================] - 45s 176ms/step - loss: 0.2453 - accuracy: 0.9209 - val_loss: 0.2519 - val_accuracy: 0.9295 - lr: 0.0100\n", - "Epoch 40/42\n", - "256/256 [==============================] - 45s 173ms/step - loss: 0.2093 - accuracy: 0.9380 - val_loss: 0.3897 - val_accuracy: 0.8478 - lr: 0.0100\n", - "Epoch 41/42\n", - "256/256 [==============================] - 45s 173ms/step - loss: 0.2331 - accuracy: 0.9355 - val_loss: 0.2271 - val_accuracy: 0.9151 - lr: 0.0100\n", - "Epoch 42/42\n", - "256/256 [==============================] - 45s 174ms/step - loss: 0.1768 - accuracy: 0.9517 - val_loss: 0.4474 - val_accuracy: 0.9247 - lr: 0.0100\n", - "\u001b[0;32mSubset training done.\u001b[0m\n", - "\u001b[0;33mLoading the best weights...\u001b[0m\n", - "\u001b[0;33mLoading weights from file cache\\model_SUB_checkpoint-039-0.9295.h5...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mModel Test acc: \u001b[0m\u001b[0;32m0.9295\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mModel Test loss: \u001b[0m\u001b[0;32m0.2519\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;91mModel accuracy did not improve from 0.9326922894. Not saving model.\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;91mModel loss did not improve from 0.2317381650. Not saving model.\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;32m(GPU-MEM)\u001b[0m\u001b[0;36m----[free: 13.52GB, used: 10.48GB, total, 24.00GB]\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(FULL): \u001b[0m\u001b[0;32m909.53 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(SUBo): \u001b[0m\u001b[0;32m281.74 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(OTHERo): \u001b[0m\u001b[0;32m627.79 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0;36m<---------------------------------------|Epoch [7] END|--------------------------------------->\u001b[0m\n", - "\u001b[0m\n", - "\u001b[0m\u001b[0mEpoch: \u001b[0m\u001b[0;36m8\u001b[0m\u001b[0m/\u001b[0m\u001b[0;32m489 (TSEC: 42)\u001b[0m\u001b[0;34m | \u001b[0m\u001b[0;32m[Learning the patterns]\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|4096|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", - "\u001b[0;33mPreparing train data...\u001b[0m\n", - "\u001b[0;33m- Augmenting Image Data...\u001b[0m\n", - "\u001b[0;33m- Normalizing Image Data...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mSetting training subset epoch.c to \u001b[0m\u001b[0;32m[6]\u001b[0m\u001b[0;33m...\u001b[0m\n", - "\u001b[0;32mTraining on subset...\u001b[0m\n", - "Epoch 43/48\n", - "256/256 [==============================] - 55s 180ms/step - loss: 0.2875 - accuracy: 0.9062 - val_loss: 0.3568 - val_accuracy: 0.8766 - lr: 0.0100\n", - "Epoch 44/48\n", - "256/256 [==============================] - 44s 171ms/step - loss: 0.2241 - accuracy: 0.9302 - val_loss: 0.2934 - val_accuracy: 0.8798 - lr: 0.0100\n", - "Epoch 45/48\n", - "256/256 [==============================] - 43s 166ms/step - loss: 0.2054 - accuracy: 0.9351 - val_loss: 0.3171 - val_accuracy: 0.8654 - lr: 0.0100\n", - "Epoch 46/48\n", - "256/256 [==============================] - 43s 166ms/step - loss: 0.1843 - accuracy: 0.9485 - val_loss: 0.4612 - val_accuracy: 0.8734 - lr: 0.0100\n", - "Epoch 47/48\n", - "256/256 [==============================] - 43s 169ms/step - loss: 0.1493 - accuracy: 0.9575 - val_loss: 0.3331 - val_accuracy: 0.9006 - lr: 0.0100\n", - "Epoch 48/48\n", - "256/256 [==============================] - 44s 172ms/step - loss: 0.1619 - accuracy: 0.9573 - val_loss: 0.2778 - val_accuracy: 0.9135 - lr: 0.0100\n", - "\u001b[0;32mSubset training done.\u001b[0m\n", - "\u001b[0;33mLoading the best weights...\u001b[0m\n", - "\u001b[0;33mLoading weights from file cache\\model_SUB_checkpoint-048-0.9135.h5...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mModel Test acc: \u001b[0m\u001b[0;32m0.9135\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mModel Test loss: \u001b[0m\u001b[0;32m0.2778\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;91mModel accuracy did not improve from 0.9326922894. Not saving model.\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;91mModel loss did not improve from 0.2317381650. Not saving model.\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;32m(GPU-MEM)\u001b[0m\u001b[0;36m----[free: 13.52GB, used: 10.48GB, total, 24.00GB]\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(FULL): \u001b[0m\u001b[0;32m905.73 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(SUBo): \u001b[0m\u001b[0;32m273.59 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(OTHERo): \u001b[0m\u001b[0;32m632.13 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0;36m<---------------------------------------|Epoch [8] END|--------------------------------------->\u001b[0m\n", - "\u001b[0m\n", - "\u001b[0m\u001b[0mEpoch: \u001b[0m\u001b[0;36m9\u001b[0m\u001b[0m/\u001b[0m\u001b[0;32m489 (TSEC: 48)\u001b[0m\u001b[0;34m | \u001b[0m\u001b[0;32m[Learning the patterns]\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|4096|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", - "\u001b[0;33mPreparing train data...\u001b[0m\n", - "\u001b[0;33m- Augmenting Image Data...\u001b[0m\n", - "\u001b[0;33m- Normalizing Image Data...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mSetting training subset epoch.c to \u001b[0m\u001b[0;32m[6]\u001b[0m\u001b[0;33m...\u001b[0m\n", - "\u001b[0;32mTraining on subset...\u001b[0m\n", - "Epoch 49/54\n", - "125/256 [=============>................] - ETA: 18s - loss: 0.3019 - accuracy: 0.9065\n", - "KeyboardInterrupt. (Training stopped)\n", - "Training done.\n", - "\n" + "\u001b[0;33m- Fitting ImageDataGenerator...\u001b[0m\n" ] } ], diff --git a/Data/image_SUB_generator.pkl b/Data/image_SUB_generator.pkl index dc7db72d322aa30d5eb875d89464db77061ec363..d51766758515bbd803de4bcc6977503e2ba45854 100644 GIT binary patch delta 39 kcmdnYzL|Z4E;Dz+vsn&MJlTR-liT&dZ$~JeoWtx608fz-tpET3 delta 39 kcmdnYzL|Z4E;IMrcXJ$|c(MhvCbyybFGnbzoWtx60A|+_2mk;8 From 9dff141f7e9154e2571cb46e99986eb7d4db7859 Mon Sep 17 00:00:00 2001 From: Aydin <108932477+Aydinhamedi@users.noreply.github.com> Date: Tue, 19 Mar 2024 21:10:41 +0330 Subject: [PATCH 2/2] modified: BETA_E_Model_T&T.ipynb modified: Data/image_SUB_generator.pkl --- BETA_E_Model_T&T.ipynb | 50 +++++++++++++++++------------------ Data/image_SUB_generator.pkl | Bin 947 -> 947 bytes 2 files changed, 25 insertions(+), 25 deletions(-) diff --git a/BETA_E_Model_T&T.ipynb b/BETA_E_Model_T&T.ipynb index 5d61aa3..dc0c04c 100644 --- a/BETA_E_Model_T&T.ipynb +++ b/BETA_E_Model_T&T.ipynb @@ -46,7 +46,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 2, "metadata": { "ExecuteTime": { "end_time": "2023-12-28T02:27:47.128539500Z", @@ -282,7 +282,7 @@ "\u001b[0m\u001b[0m\u001b[0mOriginal num_samples: \u001b[0m\u001b[0;32m23681\u001b[0m\n", "\u001b[0;33mshuffling data...\u001b[0m\n", "\u001b[0;33mSaving TS...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0mSample dir: \u001b[0m\u001b[0;32mSamples/TSR400_y2024_m03_d19-h13_m51_s36\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0mSample dir: \u001b[0m\u001b[0;32mSamples/TSR400_y2024_m03_d19-h14_m25_s08\u001b[0m\n", "\u001b[0;32mDone.\u001b[0m\n" ] } @@ -909,7 +909,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 10, "metadata": { "ExecuteTime": { "end_time": "2023-12-27T17:34:12.077394600Z", @@ -929,14 +929,14 @@ "Freezing 0 layers in the base model...\n", "Percentage of the base model that is frozen: 0.00%\n", "Total model layers: 814\n", - "Model: \"model_1\"\n", + "Model: \"model\"\n", "_____________________________________________________________________________________________________________\n", " Layer (type) Output Shape Param # Connected to Trainable \n", "=============================================================================================================\n", - " input_2 (InputLayer) [(None, 224, 224, 3 0 [] Y \n", + " input_1 (InputLayer) [(None, 224, 224, 3 0 [] Y \n", " )] \n", " \n", - " stem_conv (Conv2D) (None, 112, 112, 64 1728 ['input_2[0][0]'] Y \n", + " stem_conv (Conv2D) (None, 112, 112, 64 1728 ['input_1[0][0]'] Y \n", " ) \n", " \n", " stem_bn (BatchNormalization) (None, 112, 112, 64 256 ['stem_conv[0][0]'] Y \n", @@ -3105,13 +3105,13 @@ " # GlobalAveragePooling2D\n", " base_model_FT = GlobalAveragePooling2D(name=\"FC_INPUT_Avg-Pooling\")(base_model.output)\n", " # Dense\n", - " Dense_L1 = Dense(512, activation=\"relu\", kernel_regularizer=l2(0.0086), name=\"FC_C_Dense-L1-512\")(base_model_FT)\n", + " Dense_L1 = Dense(512, activation=\"relu\", kernel_regularizer=l2(0.0066), name=\"FC_C_Dense-L1-512\")(base_model_FT)\n", " # Dropout\n", " Dropout_L1 = Dropout(0.125, name=\"FC_C_Dropout-L1-0.1\")(Dense_L1)\n", " # BatchNormalization\n", " BatchNorm_L2 = BatchNormalization(name=\"FC_C_Avg-BatchNormalization-L1\")(Dropout_L1)\n", " # Dense\n", - " Dense_L2 = Dense(256, activation=\"relu\", kernel_regularizer=l2(0.0065), name=\"FC_C_Dense-L2-512\")(BatchNorm_L2)\n", + " Dense_L2 = Dense(256, activation=\"relu\", kernel_regularizer=l2(0.0045), name=\"FC_C_Dense-L2-512\")(BatchNorm_L2)\n", " # BatchNormalization\n", " BatchNorm_L3 = BatchNormalization(name=\"FC_C_Avg-BatchNormalization-L2\")(Dense_L2)\n", " # Dense\n", @@ -3128,7 +3128,7 @@ " # opt = Adamax() # noqa: F405\n", " # opt = RMSprop(momentum=0.9) # noqa: F405\n", " # opt = Adagrad() # noqa: F405\n", - " opt = AdaBeliefOptimizer(epsilon=1e-7, rectify=False, weight_decay=5e-4, print_change_log=False, total_steps=300700, amsgrad=True) # noqa: F405\n", + " opt = AdaBeliefOptimizer(epsilon=1e-7, rectify=False, weight_decay=5e-4, print_change_log=False, amsgrad=False) # noqa: F405\n", " # opt = Yogi() # noqa: F405\n", " model_EfficientNetB7_NS.compile(\n", " optimizer=opt, loss=\"categorical_crossentropy\", metrics=[\"accuracy\"]\n", @@ -9986,7 +9986,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 11, "metadata": { "ExecuteTime": { "end_time": "2023-12-28T07:04:23.573633300Z", @@ -10001,7 +10001,7 @@ "Training the model...\n", "\u001b[0;33m\n", "Setup Verbose:\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;36mSetting TensorBoard Log dir to \u001b[0m\u001b[0;32m[logs/fit/y2024_m03_d19-h14_m15_s27]\u001b[0m\u001b[0;36m...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;36mSetting TensorBoard Log dir to \u001b[0m\u001b[0;32m[logs/fit/y2024_m03_d19-h21_m07_s37]\u001b[0m\u001b[0;36m...\u001b[0m\n", "\u001b[0m\u001b[0m\u001b[0;36mUse_extended_tensorboard \u001b[0m\u001b[0;32m[False]\u001b[0m\u001b[0;36m.\u001b[0m\n", "\u001b[0m\u001b[0m\u001b[0;36mDebug_OUTPUT_DPS \u001b[0m\u001b[0;32m[True]\u001b[0m\u001b[0;36m.\u001b[0m\n", "\u001b[0m\u001b[0m\u001b[0;36mUse_OneCycleLr \u001b[0m\u001b[0;32m[False]\u001b[0m\u001b[0;36m.\u001b[0m\n", @@ -10009,7 +10009,7 @@ "\u001b[0m\u001b[0m\u001b[0;36mOneCycleLr_UFTS \u001b[0m\u001b[0;32m[False]\u001b[0m\u001b[0;36m.\u001b[0m\n", "\u001b[0;33mSetup Verbose END.\u001b[0m\n", "\u001b[0m\n", - "\u001b[0m\u001b[0mEpoch: \u001b[0m\u001b[0;36m1\u001b[0m\u001b[0m/\u001b[0m\u001b[0;32m489 (TSEC: 0)\u001b[0m\u001b[0;34m | \u001b[0m\u001b[0;32m[Learning the patterns]\u001b[0m\n", + "\u001b[0m\u001b[0mEpoch: \u001b[0m\u001b[0;36m1\u001b[0m\u001b[0m/\u001b[0m\u001b[0;32m489 (TSEC: 0)\u001b[0m\u001b[0;34m | \u001b[0m\u001b[0;32m[Stage 1]\u001b[0m\n", "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|4096|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", "\u001b[0;33mPreparing train data...\u001b[0m\n", "\u001b[0;33m- Fitting ImageDataGenerator...\u001b[0m\n" @@ -10028,15 +10028,15 @@ "max_EST_epoch = 164 # max_EST_epoch: Maximum number epochs to trian the model estimation for the One Cycle UFTS.\n", "subset_epoch = 6 # subset_epoch: Number of epochs to train each subset.\n", "subset_epoch_FT = 6 # subset_epoch_FT: subset_epoch after pre-training epochs.\n", - "PL_epoch = 26 # PL_epoch: Number of pre-training epochs. Use >=24 for large models or 0/1 for fine-tuning only.\n", + "Stage1_epoch = 26 # Stage1_epoch: Number of pre-training epochs. Use >=24 for large models or 0/1 for fine-tuning only.\n", "subset_size = 4096 # subset_size: Size of each training subset. Common values: 512, 1024, 2048, 3200, 4096, 5846, 8192.\n", "Conf_batch_size_REV2 = 16 # Conf_batch_size_REV2: Batch size.\n", "RES_Train = False # RES_Train: Resume training if True.\n", - "MAX_LR = 0.01 # MAX_LR: Maximum learning rate.\n", - "DEC_LR = 0.00005 # DEC_LR: Learning rate decay.\n", - "MIN_LR = 0.0005 # MIN_LR: Minimum learning rate.\n", - "RES_LR = 0.006 # RES_LR: Resuming learning rate.\n", - "ReduceLROnPlateau_factor = 0.985 # ReduceLROnPlateau_factor: ReduceLROnPlateau factor. (Lr = Factor * Lr_prev)\n", + "MAX_LR = 0.001 # MAX_LR: Maximum learning rate.\n", + "DEC_LR = 0.000005 # DEC_LR: Learning rate decay.\n", + "MIN_LR = 0.00005 # MIN_LR: Minimum learning rate.\n", + "RES_LR = 0.0006 # RES_LR: Resuming learning rate.\n", + "ReduceLROnPlateau_factor = 0.9985 # ReduceLROnPlateau_factor: ReduceLROnPlateau factor. (Lr = Factor * Lr_prev)\n", "Use_OneCycleLr = False # Use_OneCycleLr: Use OneCycleLr if True. if false, use ReduceLROnPlateau.\n", "OneCycleLr_UFTS = False # OneCycleLr_UFTS: Set the OneCycleLr max epochs to the estimated full training SUB epochs. (DEC_LR and MIN_LR dont have any effect if True)\n", "Debug_OUTPUT_DPS = True # Debug_OUTPUT_DPS: Output debug image samples if True.\n", @@ -10066,7 +10066,7 @@ "# Prep\n", "if RES_Train:\n", " MAX_LR = RES_LR\n", - " PL_epoch = 1\n", + " Stage1_epoch = 1\n", "# VAR\n", "Total_SUB_epoch_C = 0 # TO FIX TensorBoard\n", "CU_LR = MAX_LR\n", @@ -10281,7 +10281,7 @@ " learning_rate_schedule_SUB = OneCycleLr(\n", " max_lr=MAX_LR,\n", " steps_per_epoch=steps_per_epoch_train_SUB,\n", - " epochs=(PL_epoch * subset_epoch) + ((max_EST_epoch - PL_epoch) * subset_epoch_FT),\n", + " epochs=(Stage1_epoch * subset_epoch) + ((max_EST_epoch - Stage1_epoch) * subset_epoch_FT),\n", " )\n", "# ReduceLROnPlateau\n", "if not Use_OneCycleLr:\n", @@ -10342,9 +10342,9 @@ "try:\n", " for epoch in range(1, max_epoch):\n", " # Start Epoch\n", - " STG = \"Learning the patterns\" if epoch < PL_epoch else \"Fine tuning\"\n", - " C_subset_epoch = subset_epoch if epoch < PL_epoch else subset_epoch_FT\n", - " if epoch > PL_epoch and load_SUB_BRW_LMODE_FN:\n", + " STG = \"Stage 1\" if epoch < Stage1_epoch else \"Stage 2\"\n", + " C_subset_epoch = subset_epoch if epoch < Stage1_epoch else subset_epoch_FT\n", + " if epoch > Stage1_epoch and load_SUB_BRW_LMODE_FN:\n", " load_SUB_BRW_LMODE = 1\n", " start_FULL_time = time.time()\n", " if Auto_clear_cache:\n", @@ -10446,9 +10446,9 @@ " tf.summary.image(\"Debug SUB_DP Samples (Normal)\", tensor, step=epoch, max_outputs=4)\n", " del indices, tensor\n", " # learning_rate_schedule_SUB\n", - " if PL_epoch == 0:\n", + " if Stage1_epoch == 0:\n", " CU_LR = MIN_LR\n", - " elif epoch >= PL_epoch and CU_LR > MIN_LR:\n", + " elif epoch >= Stage1_epoch and CU_LR > MIN_LR:\n", " if (CU_LR - DEC_LR) < MIN_LR:\n", " CU_LR = MIN_LR\n", " else:\n", diff --git a/Data/image_SUB_generator.pkl b/Data/image_SUB_generator.pkl index d51766758515bbd803de4bcc6977503e2ba45854..ce036af631c2ef0e215de3ae7f07d58df7316d97 100644 GIT binary patch delta 38 kcmdnYzL|Z4E;F}-_&f(Fo@~La$+c+ZZ$}88oXzY30Q&U~VgLXD delta 38 kcmdnYzL|Z4E;Dz+vsn&MJlTR-lgstNZ$}88oXzY306Q=cu>b%7