diff --git a/.idea/Pneumonia AI Dev.iml b/.idea/Pneumonia AI Dev.iml index 79e38e2..58a6887 100644 --- a/.idea/Pneumonia AI Dev.iml +++ b/.idea/Pneumonia AI Dev.iml @@ -5,7 +5,7 @@ - + diff --git a/.idea/misc.xml b/.idea/misc.xml index 2282695..2ce26da 100644 --- a/.idea/misc.xml +++ b/.idea/misc.xml @@ -3,5 +3,5 @@ - + \ No newline at end of file diff --git a/BETA_E_Model_T&T.ipynb b/BETA_E_Model_T&T.ipynb index 4c16018..50eebdb 100644 --- a/BETA_E_Model_T&T.ipynb +++ b/BETA_E_Model_T&T.ipynb @@ -878,7 +878,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 15, "metadata": { "ExecuteTime": { "end_time": "2023-12-27T17:34:12.077394600Z", @@ -898,14 +898,14 @@ "Freezing 0 layers in the base model...\n", "Percentage of the base model that is frozen: 0.00%\n", "Total model layers: 814\n", - "Model: \"model_1\"\n", + "Model: \"model\"\n", "_____________________________________________________________________________________________________________\n", " Layer (type) Output Shape Param # Connected to Trainable \n", "=============================================================================================================\n", - " input_2 (InputLayer) [(None, 224, 224, 3 0 [] Y \n", + " input_1 (InputLayer) [(None, 224, 224, 3 0 [] Y \n", " )] \n", " \n", - " stem_conv (Conv2D) (None, 112, 112, 64 1728 ['input_2[0][0]'] Y \n", + " stem_conv (Conv2D) (None, 112, 112, 64 1728 ['input_1[0][0]'] Y \n", " ) \n", " \n", " stem_bn (BatchNormalization) (None, 112, 112, 64 256 ['stem_conv[0][0]'] Y \n", @@ -17220,7 +17220,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 16, "metadata": { "ExecuteTime": { "end_time": "2023-12-28T07:04:23.573633300Z", @@ -17235,51 +17235,51 @@ "Training the model...\n", "\u001b[0;33m\n", "Setup Verbose:\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;36mSetting TensorBoard Log dir to \u001b[0m\u001b[0;32m[logs/fit/y2024_m01_d19-h12_m20_s42]\u001b[0m\u001b[0;36m...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;36mSetting TensorBoard Log dir to \u001b[0m\u001b[0;32m[logs/fit/y2024_m01_d19-h15_m49_s15]\u001b[0m\u001b[0;36m...\u001b[0m\n", "\u001b[0m\u001b[0m\u001b[0;36mUse_extended_tensorboard \u001b[0m\u001b[0;32m[False]\u001b[0m\u001b[0;36m.\u001b[0m\n", "\u001b[0m\u001b[0m\u001b[0;36mDebug_OUTPUT_DPS \u001b[0m\u001b[0;32m[True]\u001b[0m\u001b[0;36m.\u001b[0m\n", "\u001b[0m\u001b[0m\u001b[0;36mOneCycleLr_UFTS \u001b[0m\u001b[0;32m[False]\u001b[0m\u001b[0;36m.\u001b[0m\n", "\u001b[0;33mSetup Verbose END.\u001b[0m\n", "\u001b[0m\n", "\u001b[0m\u001b[0mEpoch: \u001b[0m\u001b[0;36m1\u001b[0m\u001b[0m/\u001b[0m\u001b[0;32m384 (TSEC: 0)\u001b[0m\u001b[0;34m | \u001b[0m\u001b[0;32m[Learning the patterns]\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|4096|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|8192|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", "\u001b[0;33mPreparing train data...\u001b[0m\n", "\u001b[0;33m- Loading fitted ImageDataGenerator...\u001b[0m\n", "\u001b[0;33m- ImageDataGenerator fit done.\u001b[0m\n", "\u001b[0;33m- Augmenting Image Data...\u001b[0m\n", "\u001b[0;33m- Normalizing Image Data...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;31m- Debug DP Sample dir: \u001b[0m\u001b[0;32mSamples/TSR_SUB_400_y2024_m01_d19-h12_m24_s12\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;31m- Debug DP Sample dir: \u001b[0m\u001b[0;32mSamples/TSR_SUB_400_y2024_m01_d19-h15_m56_s42\u001b[0m\n", "\u001b[0m\u001b[0m\u001b[0;33mSetting training OneCycleLr::maxlr to \u001b[0m\u001b[0;32m[0.01\u001b[0m\u001b[0;31m\u001b[0m\u001b[0;32m]\u001b[0m\u001b[0;33m...\u001b[0m\n", "\u001b[0m\u001b[0m\u001b[0;33mSetting training subset epoch.c to \u001b[0m\u001b[0;32m[6]\u001b[0m\u001b[0;33m...\u001b[0m\n", "\u001b[0;32mTraining on subset...\u001b[0m\n", "Epoch 1/6\n", - "256/256 [==============================] - 104s 330ms/step - loss: 9.0310 - accuracy: 0.6648 - val_loss: 8.1002 - val_accuracy: 0.4215\n", + "512/512 [==============================] - 174s 296ms/step - loss: 8.3336 - accuracy: 0.7214 - val_loss: 6.3933 - val_accuracy: 0.7196\n", "Epoch 2/6\n", - "256/256 [==============================] - 82s 320ms/step - loss: 6.3373 - accuracy: 0.8291 - val_loss: 5.0278 - val_accuracy: 0.8365\n", + "512/512 [==============================] - 148s 289ms/step - loss: 4.2797 - accuracy: 0.8733 - val_loss: 3.1169 - val_accuracy: 0.5737\n", "Epoch 3/6\n", - "256/256 [==============================] - 80s 311ms/step - loss: 4.0626 - accuracy: 0.8865 - val_loss: 3.3355 - val_accuracy: 0.8317\n", + "512/512 [==============================] - 151s 295ms/step - loss: 1.9219 - accuracy: 0.9091 - val_loss: 1.2688 - val_accuracy: 0.9439\n", "Epoch 4/6\n", - "256/256 [==============================] - 82s 320ms/step - loss: 2.7396 - accuracy: 0.9062 - val_loss: 2.2952 - val_accuracy: 0.9071\n", + "512/512 [==============================] - 149s 291ms/step - loss: 1.0006 - accuracy: 0.9266 - val_loss: 0.9036 - val_accuracy: 0.8686\n", "Epoch 5/6\n", - "256/256 [==============================] - 82s 318ms/step - loss: 2.0226 - accuracy: 0.9250 - val_loss: 1.9101 - val_accuracy: 0.8654\n", + "512/512 [==============================] - 150s 293ms/step - loss: 0.6256 - accuracy: 0.9430 - val_loss: 0.5827 - val_accuracy: 0.9343\n", "Epoch 6/6\n", - "256/256 [==============================] - 81s 317ms/step - loss: 1.7139 - accuracy: 0.9451 - val_loss: 1.8176 - val_accuracy: 0.8830\n", + "512/512 [==============================] - 150s 293ms/step - loss: 0.4802 - accuracy: 0.9585 - val_loss: 0.5241 - val_accuracy: 0.9407\n", "\u001b[0;32mSubset training done.\u001b[0m\n", "\u001b[0;33mLoading the best weights...\u001b[0m\n", - "\u001b[0;33mLoading weights from file cache\\model_SUB_checkpoint-004-0.9071.h5...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mModel Test acc: \u001b[0m\u001b[0;32m0.9071\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mModel Test loss: \u001b[0m\u001b[0;32m2.2952\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mImproved model accuracy from \u001b[0m\u001b[0;32m 0.000000 \u001b[0m\u001b[0;33mto \u001b[0m\u001b[0;32m 0.907051\u001b[0m\u001b[0;33m. \u001b[0m\u001b[0;96mSaving model.\u001b[0m\n", + "\u001b[0;33mLoading weights from file cache\\model_SUB_checkpoint-003-0.9439.h5...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test acc: \u001b[0m\u001b[0;32m0.9439\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test loss: \u001b[0m\u001b[0;32m1.2688\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mImproved model accuracy from \u001b[0m\u001b[0;32m 0.000000 \u001b[0m\u001b[0;33mto \u001b[0m\u001b[0;32m 0.943910\u001b[0m\u001b[0;33m. \u001b[0m\u001b[0;96mSaving model.\u001b[0m\n", "\u001b[0m\u001b[0m\u001b[0;36mSaving full model H5 format...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mImproved model loss from \u001b[0m\u001b[0;32minf \u001b[0m\u001b[0;33mto \u001b[0m\u001b[0;32m2.2951622009\u001b[0m\u001b[0;33m. \u001b[0m\u001b[0;96mSaving model.\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mImproved model loss from \u001b[0m\u001b[0;32minf \u001b[0m\u001b[0;33mto \u001b[0m\u001b[0;32m1.2687988281\u001b[0m\u001b[0;33m. \u001b[0m\u001b[0;96mSaving model.\u001b[0m\n", "\u001b[0m\u001b[0m\u001b[0;36mSaving full model H5 format...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(FULL): \u001b[0m\u001b[0;32m752.21 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(SUBo): \u001b[0m\u001b[0;32m511.56 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(OTHERo): \u001b[0m\u001b[0;32m240.64 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(FULL): \u001b[0m\u001b[0;32m1401.37 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(SUBo): \u001b[0m\u001b[0;32m926.08 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(OTHERo): \u001b[0m\u001b[0;32m475.29 \u001b[0m\u001b[0;36msec\u001b[0m\n", "\u001b[0;36m<---------------------------------------|Epoch [1] END|--------------------------------------->\u001b[0m\n", "\u001b[0m\n", "\u001b[0m\u001b[0mEpoch: \u001b[0m\u001b[0;36m2\u001b[0m\u001b[0m/\u001b[0m\u001b[0;32m384 (TSEC: 6)\u001b[0m\u001b[0;34m | \u001b[0m\u001b[0;32m[Learning the patterns]\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|4096|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|8192|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", "\u001b[0;33mPreparing train data...\u001b[0m\n", "\u001b[0;33m- Augmenting Image Data...\u001b[0m\n", "\u001b[0;33m- Normalizing Image Data...\u001b[0m\n", @@ -17287,33 +17287,33 @@ "\u001b[0m\u001b[0m\u001b[0;33mSetting training subset epoch.c to \u001b[0m\u001b[0;32m[6]\u001b[0m\u001b[0;33m...\u001b[0m\n", "\u001b[0;32mTraining on subset...\u001b[0m\n", "Epoch 7/12\n", - "256/256 [==============================] - 90s 329ms/step - loss: 2.2516 - accuracy: 0.8831 - val_loss: 1.9359 - val_accuracy: 0.9151\n", + "512/512 [==============================] - 158s 296ms/step - loss: 1.2318 - accuracy: 0.9022 - val_loss: 1.0153 - val_accuracy: 0.9327\n", "Epoch 8/12\n", - "256/256 [==============================] - 81s 316ms/step - loss: 1.7008 - accuracy: 0.9006 - val_loss: 1.3691 - val_accuracy: 0.9183\n", + "512/512 [==============================] - 150s 294ms/step - loss: 0.8009 - accuracy: 0.9153 - val_loss: 0.5574 - val_accuracy: 0.9519\n", "Epoch 9/12\n", - "256/256 [==============================] - 82s 320ms/step - loss: 1.1890 - accuracy: 0.9211 - val_loss: 0.9839 - val_accuracy: 0.9359\n", + "512/512 [==============================] - 149s 290ms/step - loss: 0.5007 - accuracy: 0.9220 - val_loss: 0.3572 - val_accuracy: 0.9439\n", "Epoch 10/12\n", - "256/256 [==============================] - 81s 317ms/step - loss: 0.8683 - accuracy: 0.9304 - val_loss: 0.8350 - val_accuracy: 0.9006\n", + "512/512 [==============================] - 148s 288ms/step - loss: 0.3362 - accuracy: 0.9366 - val_loss: 0.2918 - val_accuracy: 0.9423\n", "Epoch 11/12\n", - "256/256 [==============================] - 81s 314ms/step - loss: 0.6658 - accuracy: 0.9451 - val_loss: 0.6520 - val_accuracy: 0.9327\n", + "512/512 [==============================] - 149s 291ms/step - loss: 0.2234 - accuracy: 0.9583 - val_loss: 0.2562 - val_accuracy: 0.9439\n", "Epoch 12/12\n", - "256/256 [==============================] - 82s 320ms/step - loss: 0.5736 - accuracy: 0.9578 - val_loss: 0.6282 - val_accuracy: 0.9375\n", + "512/512 [==============================] - 148s 288ms/step - loss: 0.1877 - accuracy: 0.9647 - val_loss: 0.2602 - val_accuracy: 0.9423\n", "\u001b[0;32mSubset training done.\u001b[0m\n", "\u001b[0;33mLoading the best weights...\u001b[0m\n", - "\u001b[0;33mLoading weights from file cache\\model_SUB_checkpoint-012-0.9375.h5...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mModel Test acc: \u001b[0m\u001b[0;32m0.9375\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mModel Test loss: \u001b[0m\u001b[0;32m0.6283\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mImproved model accuracy from \u001b[0m\u001b[0;32m 0.907051 \u001b[0m\u001b[0;33mto \u001b[0m\u001b[0;32m 0.937500\u001b[0m\u001b[0;33m. \u001b[0m\u001b[0;96mSaving model.\u001b[0m\n", + "\u001b[0;33mLoading weights from file cache\\model_SUB_checkpoint-008-0.9519.h5...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test acc: \u001b[0m\u001b[0;32m0.9519\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test loss: \u001b[0m\u001b[0;32m0.5574\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mImproved model accuracy from \u001b[0m\u001b[0;32m 0.943910 \u001b[0m\u001b[0;33mto \u001b[0m\u001b[0;32m 0.951923\u001b[0m\u001b[0;33m. \u001b[0m\u001b[0;96mSaving model.\u001b[0m\n", "\u001b[0m\u001b[0m\u001b[0;36mSaving full model H5 format...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mImproved model loss from \u001b[0m\u001b[0;32m2.2951622009 \u001b[0m\u001b[0;33mto \u001b[0m\u001b[0;32m0.6282643080\u001b[0m\u001b[0;33m. \u001b[0m\u001b[0;96mSaving model.\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mImproved model loss from \u001b[0m\u001b[0;32m1.2687988281 \u001b[0m\u001b[0;33mto \u001b[0m\u001b[0;32m0.5573834181\u001b[0m\u001b[0;33m. \u001b[0m\u001b[0;96mSaving model.\u001b[0m\n", "\u001b[0m\u001b[0m\u001b[0;36mSaving full model H5 format...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(FULL): \u001b[0m\u001b[0;32m731.83 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(SUBo): \u001b[0m\u001b[0;32m499.27 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(OTHERo): \u001b[0m\u001b[0;32m232.57 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(FULL): \u001b[0m\u001b[0;32m1376.48 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(SUBo): \u001b[0m\u001b[0;32m904.61 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(OTHERo): \u001b[0m\u001b[0;32m471.87 \u001b[0m\u001b[0;36msec\u001b[0m\n", "\u001b[0;36m<---------------------------------------|Epoch [2] END|--------------------------------------->\u001b[0m\n", "\u001b[0m\n", "\u001b[0m\u001b[0mEpoch: \u001b[0m\u001b[0;36m3\u001b[0m\u001b[0m/\u001b[0m\u001b[0;32m384 (TSEC: 12)\u001b[0m\u001b[0;34m | \u001b[0m\u001b[0;32m[Learning the patterns]\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|4096|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|8192|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", "\u001b[0;33mPreparing train data...\u001b[0m\n", "\u001b[0;33m- Augmenting Image Data...\u001b[0m\n", "\u001b[0;33m- Normalizing Image Data...\u001b[0m\n", @@ -17321,33 +17321,33 @@ "\u001b[0m\u001b[0m\u001b[0;33mSetting training subset epoch.c to \u001b[0m\u001b[0;32m[6]\u001b[0m\u001b[0;33m...\u001b[0m\n", "\u001b[0;32mTraining on subset...\u001b[0m\n", "Epoch 13/18\n", - "256/256 [==============================] - 84s 308ms/step - loss: 0.6855 - accuracy: 0.9084 - val_loss: 0.6029 - val_accuracy: 0.9119\n", + "512/512 [==============================] - 159s 297ms/step - loss: 0.5912 - accuracy: 0.9153 - val_loss: 0.4631 - val_accuracy: 0.9551\n", "Epoch 14/18\n", - "256/256 [==============================] - 81s 314ms/step - loss: 0.5666 - accuracy: 0.9221 - val_loss: 0.7169 - val_accuracy: 0.8013\n", + "512/512 [==============================] - 149s 291ms/step - loss: 0.4416 - accuracy: 0.9202 - val_loss: 0.3531 - val_accuracy: 0.9231\n", "Epoch 15/18\n", - "256/256 [==============================] - 81s 315ms/step - loss: 0.4459 - accuracy: 0.9304 - val_loss: 0.4446 - val_accuracy: 0.9022\n", + "512/512 [==============================] - 150s 292ms/step - loss: 0.3171 - accuracy: 0.9308 - val_loss: 0.2436 - val_accuracy: 0.9471\n", "Epoch 16/18\n", - "256/256 [==============================] - 90s 351ms/step - loss: 0.3490 - accuracy: 0.9395 - val_loss: 0.4176 - val_accuracy: 0.8878\n", + "512/512 [==============================] - 149s 290ms/step - loss: 0.2339 - accuracy: 0.9451 - val_loss: 0.2381 - val_accuracy: 0.9343\n", "Epoch 17/18\n", - "256/256 [==============================] - 84s 328ms/step - loss: 0.2697 - accuracy: 0.9553 - val_loss: 0.3256 - val_accuracy: 0.9423\n", + "512/512 [==============================] - 150s 292ms/step - loss: 0.1554 - accuracy: 0.9635 - val_loss: 0.2351 - val_accuracy: 0.9455\n", "Epoch 18/18\n", - "256/256 [==============================] - 81s 317ms/step - loss: 0.2323 - accuracy: 0.9663 - val_loss: 0.3129 - val_accuracy: 0.9343\n", + "512/512 [==============================] - 150s 292ms/step - loss: 0.1194 - accuracy: 0.9719 - val_loss: 0.2653 - val_accuracy: 0.9439\n", "\u001b[0;32mSubset training done.\u001b[0m\n", "\u001b[0;33mLoading the best weights...\u001b[0m\n", - "\u001b[0;33mLoading weights from file cache\\model_SUB_checkpoint-017-0.9423.h5...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mModel Test acc: \u001b[0m\u001b[0;32m0.9423\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mModel Test loss: \u001b[0m\u001b[0;32m0.3256\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mImproved model accuracy from \u001b[0m\u001b[0;32m 0.937500 \u001b[0m\u001b[0;33mto \u001b[0m\u001b[0;32m 0.942308\u001b[0m\u001b[0;33m. \u001b[0m\u001b[0;96mSaving model.\u001b[0m\n", + "\u001b[0;33mLoading weights from file cache\\model_SUB_checkpoint-013-0.9551.h5...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test acc: \u001b[0m\u001b[0;32m0.9551\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test loss: \u001b[0m\u001b[0;32m0.4631\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mImproved model accuracy from \u001b[0m\u001b[0;32m 0.951923 \u001b[0m\u001b[0;33mto \u001b[0m\u001b[0;32m 0.955128\u001b[0m\u001b[0;33m. \u001b[0m\u001b[0;96mSaving model.\u001b[0m\n", "\u001b[0m\u001b[0m\u001b[0;36mSaving full model H5 format...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mImproved model loss from \u001b[0m\u001b[0;32m0.6282643080 \u001b[0m\u001b[0;33mto \u001b[0m\u001b[0;32m0.3256246150\u001b[0m\u001b[0;33m. \u001b[0m\u001b[0;96mSaving model.\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mImproved model loss from \u001b[0m\u001b[0;32m0.5573834181 \u001b[0m\u001b[0;33mto \u001b[0m\u001b[0;32m0.4630721211\u001b[0m\u001b[0;33m. \u001b[0m\u001b[0;96mSaving model.\u001b[0m\n", "\u001b[0m\u001b[0m\u001b[0;36mSaving full model H5 format...\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(FULL): \u001b[0m\u001b[0;32m689.56 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(SUBo): \u001b[0m\u001b[0;32m501.80 \u001b[0m\u001b[0;36msec\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(OTHERo): \u001b[0m\u001b[0;32m187.75 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(FULL): \u001b[0m\u001b[0;32m1423.39 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(SUBo): \u001b[0m\u001b[0;32m908.12 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(OTHERo): \u001b[0m\u001b[0;32m515.26 \u001b[0m\u001b[0;36msec\u001b[0m\n", "\u001b[0;36m<---------------------------------------|Epoch [3] END|--------------------------------------->\u001b[0m\n", "\u001b[0m\n", "\u001b[0m\u001b[0mEpoch: \u001b[0m\u001b[0;36m4\u001b[0m\u001b[0m/\u001b[0m\u001b[0;32m384 (TSEC: 18)\u001b[0m\u001b[0;34m | \u001b[0m\u001b[0;32m[Learning the patterns]\u001b[0m\n", - "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|4096|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|8192|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", "\u001b[0;33mPreparing train data...\u001b[0m\n", "\u001b[0;33m- Augmenting Image Data...\u001b[0m\n", "\u001b[0;33m- Normalizing Image Data...\u001b[0m\n", @@ -17355,9 +17355,403 @@ "\u001b[0m\u001b[0m\u001b[0;33mSetting training subset epoch.c to \u001b[0m\u001b[0;32m[6]\u001b[0m\u001b[0;33m...\u001b[0m\n", "\u001b[0;32mTraining on subset...\u001b[0m\n", "Epoch 19/24\n", - "256/256 [==============================] - 95s 339ms/step - loss: 0.3673 - accuracy: 0.9138 - val_loss: 0.3421 - val_accuracy: 0.8974\n", + "512/512 [==============================] - 154s 290ms/step - loss: 0.4759 - accuracy: 0.9226 - val_loss: 0.3878 - val_accuracy: 0.9503\n", "Epoch 20/24\n", - " 21/256 [=>............................] - ETA: 1:01 - loss: 0.3448 - accuracy: 0.9196" + "512/512 [==============================] - 146s 285ms/step - loss: 0.3835 - accuracy: 0.9261 - val_loss: 0.2619 - val_accuracy: 0.9519\n", + "Epoch 21/24\n", + "512/512 [==============================] - 148s 289ms/step - loss: 0.2742 - accuracy: 0.9390 - val_loss: 0.2484 - val_accuracy: 0.9439\n", + "Epoch 22/24\n", + "512/512 [==============================] - 148s 290ms/step - loss: 0.2090 - accuracy: 0.9501 - val_loss: 0.2298 - val_accuracy: 0.9407\n", + "Epoch 23/24\n", + "512/512 [==============================] - 147s 287ms/step - loss: 0.1471 - accuracy: 0.9642 - val_loss: 0.2666 - val_accuracy: 0.9423\n", + "Epoch 24/24\n", + "512/512 [==============================] - 147s 286ms/step - loss: 0.1054 - accuracy: 0.9786 - val_loss: 0.2464 - val_accuracy: 0.9503\n", + "\u001b[0;32mSubset training done.\u001b[0m\n", + "\u001b[0;33mLoading the best weights...\u001b[0m\n", + "\u001b[0;33mLoading weights from file cache\\model_SUB_checkpoint-020-0.9519.h5...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test acc: \u001b[0m\u001b[0;32m0.9519\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test loss: \u001b[0m\u001b[0;32m0.2619\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;91mModel accuracy did not improve from 0.9551281929. Not saving model.\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mImproved model loss from \u001b[0m\u001b[0;32m0.4630721211 \u001b[0m\u001b[0;33mto \u001b[0m\u001b[0;32m0.2619114518\u001b[0m\u001b[0;33m. \u001b[0m\u001b[0;96mSaving model.\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;36mSaving full model H5 format...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(FULL): \u001b[0m\u001b[0;32m1355.36 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(SUBo): \u001b[0m\u001b[0;32m892.69 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(OTHERo): \u001b[0m\u001b[0;32m462.66 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0;36m<---------------------------------------|Epoch [4] END|--------------------------------------->\u001b[0m\n", + "\u001b[0m\n", + "\u001b[0m\u001b[0mEpoch: \u001b[0m\u001b[0;36m5\u001b[0m\u001b[0m/\u001b[0m\u001b[0;32m384 (TSEC: 24)\u001b[0m\u001b[0;34m | \u001b[0m\u001b[0;32m[Learning the patterns]\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|8192|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0;33mPreparing train data...\u001b[0m\n", + "\u001b[0;33m- Augmenting Image Data...\u001b[0m\n", + "\u001b[0;33m- Normalizing Image Data...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mSetting training OneCycleLr::maxlr to \u001b[0m\u001b[0;32m[0.01\u001b[0m\u001b[0;31m\u001b[0m\u001b[0;32m]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mSetting training subset epoch.c to \u001b[0m\u001b[0;32m[6]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0;32mTraining on subset...\u001b[0m\n", + "Epoch 25/30\n", + "512/512 [==============================] - 154s 290ms/step - loss: 0.3177 - accuracy: 0.9240 - val_loss: 0.2506 - val_accuracy: 0.9471\n", + "Epoch 26/30\n", + "512/512 [==============================] - 146s 284ms/step - loss: 0.2765 - accuracy: 0.9309 - val_loss: 0.2564 - val_accuracy: 0.9135\n", + "Epoch 27/30\n", + "512/512 [==============================] - 145s 282ms/step - loss: 0.2223 - accuracy: 0.9376 - val_loss: 0.1866 - val_accuracy: 0.9407\n", + "Epoch 28/30\n", + "512/512 [==============================] - 144s 281ms/step - loss: 0.1703 - accuracy: 0.9481 - val_loss: 0.1827 - val_accuracy: 0.9423\n", + "Epoch 29/30\n", + "512/512 [==============================] - 145s 283ms/step - loss: 0.1234 - accuracy: 0.9646 - val_loss: 0.1707 - val_accuracy: 0.9535\n", + "Epoch 30/30\n", + "512/512 [==============================] - 144s 280ms/step - loss: 0.0869 - accuracy: 0.9774 - val_loss: 0.1856 - val_accuracy: 0.9535\n", + "\u001b[0;32mSubset training done.\u001b[0m\n", + "\u001b[0;33mLoading the best weights...\u001b[0m\n", + "\u001b[0;33mLoading weights from file cache\\model_SUB_checkpoint-029-0.9535.h5...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test acc: \u001b[0m\u001b[0;32m0.9535\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test loss: \u001b[0m\u001b[0;32m0.1707\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;91mModel accuracy did not improve from 0.9551281929. Not saving model.\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mImproved model loss from \u001b[0m\u001b[0;32m0.2619114518 \u001b[0m\u001b[0;33mto \u001b[0m\u001b[0;32m0.1706935465\u001b[0m\u001b[0;33m. \u001b[0m\u001b[0;96mSaving model.\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;36mSaving full model H5 format...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(FULL): \u001b[0m\u001b[0;32m1286.65 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(SUBo): \u001b[0m\u001b[0;32m879.57 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(OTHERo): \u001b[0m\u001b[0;32m407.09 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0;36m<---------------------------------------|Epoch [5] END|--------------------------------------->\u001b[0m\n", + "\u001b[0m\n", + "\u001b[0m\u001b[0mEpoch: \u001b[0m\u001b[0;36m6\u001b[0m\u001b[0m/\u001b[0m\u001b[0;32m384 (TSEC: 30)\u001b[0m\u001b[0;34m | \u001b[0m\u001b[0;32m[Learning the patterns]\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|8192|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0;33mPreparing train data...\u001b[0m\n", + "\u001b[0;33m- Augmenting Image Data...\u001b[0m\n", + "\u001b[0;33m- Normalizing Image Data...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mSetting training OneCycleLr::maxlr to \u001b[0m\u001b[0;32m[0.01\u001b[0m\u001b[0;31m\u001b[0m\u001b[0;32m]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mSetting training subset epoch.c to \u001b[0m\u001b[0;32m[6]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0;32mTraining on subset...\u001b[0m\n", + "Epoch 31/36\n", + "512/512 [==============================] - 153s 288ms/step - loss: 0.2210 - accuracy: 0.9341 - val_loss: 0.1851 - val_accuracy: 0.9359\n", + "Epoch 32/36\n", + "512/512 [==============================] - 146s 284ms/step - loss: 0.1968 - accuracy: 0.9366 - val_loss: 0.1723 - val_accuracy: 0.9455\n", + "Epoch 33/36\n", + "512/512 [==============================] - 146s 285ms/step - loss: 0.1653 - accuracy: 0.9468 - val_loss: 0.1958 - val_accuracy: 0.9599\n", + "Epoch 34/36\n", + "512/512 [==============================] - 144s 281ms/step - loss: 0.1259 - accuracy: 0.9640 - val_loss: 0.1691 - val_accuracy: 0.9471\n", + "Epoch 35/36\n", + "512/512 [==============================] - 145s 282ms/step - loss: 0.0974 - accuracy: 0.9690 - val_loss: 0.1530 - val_accuracy: 0.9567\n", + "Epoch 36/36\n", + "512/512 [==============================] - 144s 281ms/step - loss: 0.0638 - accuracy: 0.9813 - val_loss: 0.1830 - val_accuracy: 0.9503\n", + "\u001b[0;32mSubset training done.\u001b[0m\n", + "\u001b[0;33mLoading the best weights...\u001b[0m\n", + "\u001b[0;33mLoading weights from file cache\\model_SUB_checkpoint-033-0.9599.h5...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test acc: \u001b[0m\u001b[0;32m0.9599\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test loss: \u001b[0m\u001b[0;32m0.1958\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mImproved model accuracy from \u001b[0m\u001b[0;32m 0.955128 \u001b[0m\u001b[0;33mto \u001b[0m\u001b[0;32m 0.959936\u001b[0m\u001b[0;33m. \u001b[0m\u001b[0;96mSaving model.\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;36mSaving full model H5 format...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;91mModel loss did not improve from 0.1706935465. Not saving model.\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(FULL): \u001b[0m\u001b[0;32m1274.18 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(SUBo): \u001b[0m\u001b[0;32m879.48 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(OTHERo): \u001b[0m\u001b[0;32m394.71 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0;36m<---------------------------------------|Epoch [6] END|--------------------------------------->\u001b[0m\n", + "\u001b[0m\n", + "\u001b[0m\u001b[0mEpoch: \u001b[0m\u001b[0;36m7\u001b[0m\u001b[0m/\u001b[0m\u001b[0;32m384 (TSEC: 36)\u001b[0m\u001b[0;34m | \u001b[0m\u001b[0;32m[Learning the patterns]\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|8192|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0;33mPreparing train data...\u001b[0m\n", + "\u001b[0;33m- Augmenting Image Data...\u001b[0m\n", + "\u001b[0;33m- Normalizing Image Data...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mSetting training OneCycleLr::maxlr to \u001b[0m\u001b[0;32m[0.01\u001b[0m\u001b[0;31m\u001b[0m\u001b[0;32m]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mSetting training subset epoch.c to \u001b[0m\u001b[0;32m[6]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0;32mTraining on subset...\u001b[0m\n", + "Epoch 37/42\n", + "512/512 [==============================] - 153s 288ms/step - loss: 0.1881 - accuracy: 0.9398 - val_loss: 0.1476 - val_accuracy: 0.9519\n", + "Epoch 38/42\n", + "512/512 [==============================] - 145s 282ms/step - loss: 0.1715 - accuracy: 0.9441 - val_loss: 0.1809 - val_accuracy: 0.9455\n", + "Epoch 39/42\n", + "512/512 [==============================] - 144s 281ms/step - loss: 0.1476 - accuracy: 0.9564 - val_loss: 0.3669 - val_accuracy: 0.9247\n", + "Epoch 40/42\n", + "512/512 [==============================] - 144s 281ms/step - loss: 0.1108 - accuracy: 0.9658 - val_loss: 0.1840 - val_accuracy: 0.9423\n", + "Epoch 41/42\n", + "512/512 [==============================] - 145s 283ms/step - loss: 0.0762 - accuracy: 0.9796 - val_loss: 0.1843 - val_accuracy: 0.9551\n", + "Epoch 42/42\n", + "512/512 [==============================] - 144s 281ms/step - loss: 0.0526 - accuracy: 0.9854 - val_loss: 0.1974 - val_accuracy: 0.9503\n", + "\u001b[0;32mSubset training done.\u001b[0m\n", + "\u001b[0;33mLoading the best weights...\u001b[0m\n", + "\u001b[0;33mLoading weights from file cache\\model_SUB_checkpoint-041-0.9551.h5...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test acc: \u001b[0m\u001b[0;32m0.9551\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test loss: \u001b[0m\u001b[0;32m0.1842\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;91mModel accuracy did not improve from 0.9599359035. Not saving model.\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;91mModel loss did not improve from 0.1706935465. Not saving model.\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(FULL): \u001b[0m\u001b[0;32m1249.93 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(SUBo): \u001b[0m\u001b[0;32m876.85 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(OTHERo): \u001b[0m\u001b[0;32m373.08 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0;36m<---------------------------------------|Epoch [7] END|--------------------------------------->\u001b[0m\n", + "\u001b[0m\n", + "\u001b[0m\u001b[0mEpoch: \u001b[0m\u001b[0;36m8\u001b[0m\u001b[0m/\u001b[0m\u001b[0;32m384 (TSEC: 42)\u001b[0m\u001b[0;34m | \u001b[0m\u001b[0;32m[Learning the patterns]\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|8192|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0;33mPreparing train data...\u001b[0m\n", + "\u001b[0;33m- Augmenting Image Data...\u001b[0m\n", + "\u001b[0;33m- Normalizing Image Data...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mSetting training OneCycleLr::maxlr to \u001b[0m\u001b[0;32m[0.01\u001b[0m\u001b[0;31m\u001b[0m\u001b[0;32m]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mSetting training subset epoch.c to \u001b[0m\u001b[0;32m[6]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0;32mTraining on subset...\u001b[0m\n", + "Epoch 43/48\n", + "512/512 [==============================] - 154s 290ms/step - loss: 0.1798 - accuracy: 0.9392 - val_loss: 0.2040 - val_accuracy: 0.9439\n", + "Epoch 44/48\n", + "512/512 [==============================] - 147s 286ms/step - loss: 0.1609 - accuracy: 0.9443 - val_loss: 0.2329 - val_accuracy: 0.9519\n", + "Epoch 45/48\n", + "512/512 [==============================] - 145s 283ms/step - loss: 0.1467 - accuracy: 0.9528 - val_loss: 0.1690 - val_accuracy: 0.9503\n", + "Epoch 46/48\n", + "512/512 [==============================] - 146s 285ms/step - loss: 0.1168 - accuracy: 0.9636 - val_loss: 0.1768 - val_accuracy: 0.9551\n", + "Epoch 47/48\n", + "512/512 [==============================] - 145s 282ms/step - loss: 0.0739 - accuracy: 0.9799 - val_loss: 0.1621 - val_accuracy: 0.9519\n", + "Epoch 48/48\n", + "512/512 [==============================] - 144s 282ms/step - loss: 0.0514 - accuracy: 0.9838 - val_loss: 0.1962 - val_accuracy: 0.9551\n", + "\u001b[0;32mSubset training done.\u001b[0m\n", + "\u001b[0;33mLoading the best weights...\u001b[0m\n", + "\u001b[0;33mLoading weights from file cache\\model_SUB_checkpoint-046-0.9551.h5...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test acc: \u001b[0m\u001b[0;32m0.9551\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test loss: \u001b[0m\u001b[0;32m0.1768\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;91mModel accuracy did not improve from 0.9599359035. Not saving model.\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;91mModel loss did not improve from 0.1706935465. Not saving model.\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(FULL): \u001b[0m\u001b[0;32m1293.50 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(SUBo): \u001b[0m\u001b[0;32m882.68 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(OTHERo): \u001b[0m\u001b[0;32m410.82 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0;36m<---------------------------------------|Epoch [8] END|--------------------------------------->\u001b[0m\n", + "\u001b[0m\n", + "\u001b[0m\u001b[0mEpoch: \u001b[0m\u001b[0;36m9\u001b[0m\u001b[0m/\u001b[0m\u001b[0;32m384 (TSEC: 48)\u001b[0m\u001b[0;34m | \u001b[0m\u001b[0;32m[Learning the patterns]\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|8192|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0;33mPreparing train data...\u001b[0m\n", + "\u001b[0;33m- Augmenting Image Data...\u001b[0m\n", + "\u001b[0;33m- Normalizing Image Data...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mSetting training OneCycleLr::maxlr to \u001b[0m\u001b[0;32m[0.01\u001b[0m\u001b[0;31m\u001b[0m\u001b[0;32m]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mSetting training subset epoch.c to \u001b[0m\u001b[0;32m[6]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0;32mTraining on subset...\u001b[0m\n", + "Epoch 49/54\n", + "512/512 [==============================] - 152s 287ms/step - loss: 0.1787 - accuracy: 0.9412 - val_loss: 0.1736 - val_accuracy: 0.9535\n", + "Epoch 50/54\n", + "512/512 [==============================] - 141s 275ms/step - loss: 0.1598 - accuracy: 0.9476 - val_loss: 0.1442 - val_accuracy: 0.9535\n", + "Epoch 51/54\n", + "512/512 [==============================] - 140s 273ms/step - loss: 0.1240 - accuracy: 0.9606 - val_loss: 0.2137 - val_accuracy: 0.9439\n", + "Epoch 52/54\n", + "512/512 [==============================] - 141s 274ms/step - loss: 0.0987 - accuracy: 0.9714 - val_loss: 0.1714 - val_accuracy: 0.9423\n", + "Epoch 53/54\n", + "512/512 [==============================] - 141s 275ms/step - loss: 0.0613 - accuracy: 0.9807 - val_loss: 0.1642 - val_accuracy: 0.9631\n", + "Epoch 54/54\n", + "512/512 [==============================] - 140s 272ms/step - loss: 0.0413 - accuracy: 0.9888 - val_loss: 0.1627 - val_accuracy: 0.9631\n", + "\u001b[0;32mSubset training done.\u001b[0m\n", + "\u001b[0;33mLoading the best weights...\u001b[0m\n", + "\u001b[0;33mLoading weights from file cache\\model_SUB_checkpoint-053-0.9631.h5...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test acc: \u001b[0m\u001b[0;32m0.9631\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test loss: \u001b[0m\u001b[0;32m0.1642\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mImproved model accuracy from \u001b[0m\u001b[0;32m 0.959936 \u001b[0m\u001b[0;33mto \u001b[0m\u001b[0;32m 0.963141\u001b[0m\u001b[0;33m. \u001b[0m\u001b[0;96mSaving model.\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;36mSaving full model H5 format...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mImproved model loss from \u001b[0m\u001b[0;32m0.1706935465 \u001b[0m\u001b[0;33mto \u001b[0m\u001b[0;32m0.1641677022\u001b[0m\u001b[0;33m. \u001b[0m\u001b[0;96mSaving model.\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;36mSaving full model H5 format...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(FULL): \u001b[0m\u001b[0;32m1278.21 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(SUBo): \u001b[0m\u001b[0;32m856.87 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(OTHERo): \u001b[0m\u001b[0;32m421.34 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0;36m<---------------------------------------|Epoch [9] END|--------------------------------------->\u001b[0m\n", + "\u001b[0m\n", + "\u001b[0m\u001b[0mEpoch: \u001b[0m\u001b[0;36m10\u001b[0m\u001b[0m/\u001b[0m\u001b[0;32m384 (TSEC: 54)\u001b[0m\u001b[0;34m | \u001b[0m\u001b[0;32m[Learning the patterns]\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|8192|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0;33mPreparing train data...\u001b[0m\n", + "\u001b[0;33m- Augmenting Image Data...\u001b[0m\n", + "\u001b[0;33m- Normalizing Image Data...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mSetting training OneCycleLr::maxlr to \u001b[0m\u001b[0;32m[0.01\u001b[0m\u001b[0;31m\u001b[0m\u001b[0;32m]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mSetting training subset epoch.c to \u001b[0m\u001b[0;32m[6]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0;32mTraining on subset...\u001b[0m\n", + "Epoch 55/60\n", + "512/512 [==============================] - 149s 281ms/step - loss: 0.1624 - accuracy: 0.9475 - val_loss: 0.1410 - val_accuracy: 0.9647\n", + "Epoch 56/60\n", + "512/512 [==============================] - 141s 275ms/step - loss: 0.1416 - accuracy: 0.9535 - val_loss: 0.1496 - val_accuracy: 0.9359\n", + "Epoch 57/60\n", + "512/512 [==============================] - 150s 292ms/step - loss: 0.1101 - accuracy: 0.9644 - val_loss: 0.1470 - val_accuracy: 0.9551\n", + "Epoch 58/60\n", + "512/512 [==============================] - 149s 290ms/step - loss: 0.0803 - accuracy: 0.9734 - val_loss: 0.2410 - val_accuracy: 0.9471\n", + "Epoch 59/60\n", + "512/512 [==============================] - 147s 287ms/step - loss: 0.0518 - accuracy: 0.9852 - val_loss: 0.1664 - val_accuracy: 0.9551\n", + "Epoch 60/60\n", + "512/512 [==============================] - 150s 294ms/step - loss: 0.0294 - accuracy: 0.9916 - val_loss: 0.1992 - val_accuracy: 0.9551\n", + "\u001b[0;32mSubset training done.\u001b[0m\n", + "\u001b[0;33mLoading the best weights...\u001b[0m\n", + "\u001b[0;33mLoading weights from file cache\\model_SUB_checkpoint-055-0.9647.h5...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test acc: \u001b[0m\u001b[0;32m0.9647\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test loss: \u001b[0m\u001b[0;32m0.1410\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mImproved model accuracy from \u001b[0m\u001b[0;32m 0.963141 \u001b[0m\u001b[0;33mto \u001b[0m\u001b[0;32m 0.964744\u001b[0m\u001b[0;33m. \u001b[0m\u001b[0;96mSaving model.\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;36mSaving full model H5 format...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mImproved model loss from \u001b[0m\u001b[0;32m0.1641677022 \u001b[0m\u001b[0;33mto \u001b[0m\u001b[0;32m0.1409760416\u001b[0m\u001b[0;33m. \u001b[0m\u001b[0;96mSaving model.\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;36mSaving full model H5 format...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(FULL): \u001b[0m\u001b[0;32m1312.39 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(SUBo): \u001b[0m\u001b[0;32m888.51 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(OTHERo): \u001b[0m\u001b[0;32m423.88 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0;36m<---------------------------------------|Epoch [10] END|--------------------------------------->\u001b[0m\n", + "\u001b[0m\n", + "\u001b[0m\u001b[0mEpoch: \u001b[0m\u001b[0;36m11\u001b[0m\u001b[0m/\u001b[0m\u001b[0;32m384 (TSEC: 60)\u001b[0m\u001b[0;34m | \u001b[0m\u001b[0;32m[Learning the patterns]\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|8192|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0;33mPreparing train data...\u001b[0m\n", + "\u001b[0;33m- Augmenting Image Data...\u001b[0m\n", + "\u001b[0;33m- Normalizing Image Data...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mSetting training OneCycleLr::maxlr to \u001b[0m\u001b[0;32m[0.01\u001b[0m\u001b[0;31m\u001b[0m\u001b[0;32m]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mSetting training subset epoch.c to \u001b[0m\u001b[0;32m[6]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0;32mTraining on subset...\u001b[0m\n", + "Epoch 61/66\n", + "512/512 [==============================] - 161s 302ms/step - loss: 0.1616 - accuracy: 0.9456 - val_loss: 0.2287 - val_accuracy: 0.9519\n", + "Epoch 62/66\n", + "512/512 [==============================] - 152s 296ms/step - loss: 0.1512 - accuracy: 0.9504 - val_loss: 0.1797 - val_accuracy: 0.9279\n", + "Epoch 63/66\n", + "512/512 [==============================] - 151s 295ms/step - loss: 0.1266 - accuracy: 0.9572 - val_loss: 0.1695 - val_accuracy: 0.9455\n", + "Epoch 64/66\n", + "512/512 [==============================] - 151s 295ms/step - loss: 0.0952 - accuracy: 0.9694 - val_loss: 0.2256 - val_accuracy: 0.9487\n", + "Epoch 65/66\n", + "512/512 [==============================] - 152s 297ms/step - loss: 0.0605 - accuracy: 0.9823 - val_loss: 0.1942 - val_accuracy: 0.9583\n", + "Epoch 66/66\n", + "512/512 [==============================] - 152s 297ms/step - loss: 0.0407 - accuracy: 0.9891 - val_loss: 0.1949 - val_accuracy: 0.9599\n", + "\u001b[0;32mSubset training done.\u001b[0m\n", + "\u001b[0;33mLoading the best weights...\u001b[0m\n", + "\u001b[0;33mLoading weights from file cache\\model_SUB_checkpoint-066-0.9599.h5...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test acc: \u001b[0m\u001b[0;32m0.9615\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test loss: \u001b[0m\u001b[0;32m0.1949\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;91mModel accuracy did not improve from 0.9647436142. Not saving model.\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;91mModel loss did not improve from 0.1409760416. Not saving model.\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(FULL): \u001b[0m\u001b[0;32m1488.80 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(SUBo): \u001b[0m\u001b[0;32m922.13 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(OTHERo): \u001b[0m\u001b[0;32m566.67 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0;36m<---------------------------------------|Epoch [11] END|--------------------------------------->\u001b[0m\n", + "\u001b[0m\n", + "\u001b[0m\u001b[0mEpoch: \u001b[0m\u001b[0;36m12\u001b[0m\u001b[0m/\u001b[0m\u001b[0;32m384 (TSEC: 66)\u001b[0m\u001b[0;34m | \u001b[0m\u001b[0;32m[Learning the patterns]\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|8192|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0;33mPreparing train data...\u001b[0m\n", + "\u001b[0;33m- Augmenting Image Data...\u001b[0m\n", + "\u001b[0;33m- Normalizing Image Data...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mSetting training OneCycleLr::maxlr to \u001b[0m\u001b[0;32m[0.01\u001b[0m\u001b[0;31m\u001b[0m\u001b[0;32m]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mSetting training subset epoch.c to \u001b[0m\u001b[0;32m[6]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0;32mTraining on subset...\u001b[0m\n", + "Epoch 67/72\n", + "512/512 [==============================] - 162s 303ms/step - loss: 0.1593 - accuracy: 0.9509 - val_loss: 0.1646 - val_accuracy: 0.9599\n", + "Epoch 68/72\n", + "512/512 [==============================] - 152s 297ms/step - loss: 0.1379 - accuracy: 0.9552 - val_loss: 0.1463 - val_accuracy: 0.9535\n", + "Epoch 69/72\n", + "512/512 [==============================] - 152s 295ms/step - loss: 0.1148 - accuracy: 0.9674 - val_loss: 0.2137 - val_accuracy: 0.9391\n", + "Epoch 70/72\n", + "512/512 [==============================] - 151s 294ms/step - loss: 0.0876 - accuracy: 0.9738 - val_loss: 0.1618 - val_accuracy: 0.9503\n", + "Epoch 71/72\n", + "512/512 [==============================] - 151s 295ms/step - loss: 0.0533 - accuracy: 0.9835 - val_loss: 0.1815 - val_accuracy: 0.9471\n", + "Epoch 72/72\n", + "512/512 [==============================] - 151s 294ms/step - loss: 0.0367 - accuracy: 0.9904 - val_loss: 0.1962 - val_accuracy: 0.9487\n", + "\u001b[0;32mSubset training done.\u001b[0m\n", + "\u001b[0;33mLoading the best weights...\u001b[0m\n", + "\u001b[0;33mLoading weights from file cache\\model_SUB_checkpoint-067-0.9599.h5...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test acc: \u001b[0m\u001b[0;32m0.9599\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test loss: \u001b[0m\u001b[0;32m0.1646\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;91mModel accuracy did not improve from 0.9647436142. Not saving model.\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;91mModel loss did not improve from 0.1409760416. Not saving model.\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(FULL): \u001b[0m\u001b[0;32m1489.11 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(SUBo): \u001b[0m\u001b[0;32m920.11 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(OTHERo): \u001b[0m\u001b[0;32m569.00 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0;36m<---------------------------------------|Epoch [12] END|--------------------------------------->\u001b[0m\n", + "\u001b[0m\n", + "\u001b[0m\u001b[0mEpoch: \u001b[0m\u001b[0;36m13\u001b[0m\u001b[0m/\u001b[0m\u001b[0;32m384 (TSEC: 72)\u001b[0m\u001b[0;34m | \u001b[0m\u001b[0;32m[Learning the patterns]\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|8192|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0;33mPreparing train data...\u001b[0m\n", + "\u001b[0;33m- Augmenting Image Data...\u001b[0m\n", + "\u001b[0;33m- Normalizing Image Data...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mSetting training OneCycleLr::maxlr to \u001b[0m\u001b[0;32m[0.01\u001b[0m\u001b[0;31m\u001b[0m\u001b[0;32m]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mSetting training subset epoch.c to \u001b[0m\u001b[0;32m[6]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0;32mTraining on subset...\u001b[0m\n", + "Epoch 73/78\n", + "512/512 [==============================] - 163s 304ms/step - loss: 0.1386 - accuracy: 0.9541 - val_loss: 0.2616 - val_accuracy: 0.9407\n", + "Epoch 74/78\n", + "512/512 [==============================] - 153s 298ms/step - loss: 0.1357 - accuracy: 0.9564 - val_loss: 0.1462 - val_accuracy: 0.9567\n", + "Epoch 75/78\n", + "512/512 [==============================] - 152s 296ms/step - loss: 0.1073 - accuracy: 0.9652 - val_loss: 0.1498 - val_accuracy: 0.9583\n", + "Epoch 76/78\n", + "512/512 [==============================] - 152s 297ms/step - loss: 0.0716 - accuracy: 0.9784 - val_loss: 0.1459 - val_accuracy: 0.9599\n", + "Epoch 77/78\n", + "512/512 [==============================] - 151s 294ms/step - loss: 0.0506 - accuracy: 0.9854 - val_loss: 0.1873 - val_accuracy: 0.9551\n", + "Epoch 78/78\n", + "512/512 [==============================] - 150s 291ms/step - loss: 0.0402 - accuracy: 0.9889 - val_loss: 0.2123 - val_accuracy: 0.9471\n", + "\u001b[0;32mSubset training done.\u001b[0m\n", + "\u001b[0;33mLoading the best weights...\u001b[0m\n", + "\u001b[0;33mLoading weights from file cache\\model_SUB_checkpoint-076-0.9599.h5...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test acc: \u001b[0m\u001b[0;32m0.9583\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test loss: \u001b[0m\u001b[0;32m0.1459\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;91mModel accuracy did not improve from 0.9647436142. Not saving model.\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;91mModel loss did not improve from 0.1409760416. Not saving model.\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(FULL): \u001b[0m\u001b[0;32m1506.91 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(SUBo): \u001b[0m\u001b[0;32m922.24 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(OTHERo): \u001b[0m\u001b[0;32m584.67 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0;36m<---------------------------------------|Epoch [13] END|--------------------------------------->\u001b[0m\n", + "\u001b[0m\n", + "\u001b[0m\u001b[0mEpoch: \u001b[0m\u001b[0;36m14\u001b[0m\u001b[0m/\u001b[0m\u001b[0;32m384 (TSEC: 78)\u001b[0m\u001b[0;34m | \u001b[0m\u001b[0;32m[Learning the patterns]\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|8192|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0;33mPreparing train data...\u001b[0m\n", + "\u001b[0;33m- Augmenting Image Data...\u001b[0m\n", + "\u001b[0;33m- Normalizing Image Data...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mSetting training OneCycleLr::maxlr to \u001b[0m\u001b[0;32m[0.01\u001b[0m\u001b[0;31m\u001b[0m\u001b[0;32m]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mSetting training subset epoch.c to \u001b[0m\u001b[0;32m[6]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0;32mTraining on subset...\u001b[0m\n", + "Epoch 79/84\n", + "512/512 [==============================] - 156s 293ms/step - loss: 0.1629 - accuracy: 0.9464 - val_loss: 0.1285 - val_accuracy: 0.9615\n", + "Epoch 80/84\n", + "512/512 [==============================] - 147s 286ms/step - loss: 0.1460 - accuracy: 0.9513 - val_loss: 0.1695 - val_accuracy: 0.9519\n", + "Epoch 81/84\n", + "512/512 [==============================] - 146s 285ms/step - loss: 0.1021 - accuracy: 0.9669 - val_loss: 0.1721 - val_accuracy: 0.9455\n", + "Epoch 82/84\n", + "512/512 [==============================] - 147s 286ms/step - loss: 0.0818 - accuracy: 0.9734 - val_loss: 0.1511 - val_accuracy: 0.9503\n", + "Epoch 83/84\n", + "512/512 [==============================] - 145s 283ms/step - loss: 0.0500 - accuracy: 0.9843 - val_loss: 0.1676 - val_accuracy: 0.9455\n", + "Epoch 84/84\n", + "512/512 [==============================] - 145s 282ms/step - loss: 0.0390 - accuracy: 0.9895 - val_loss: 0.2092 - val_accuracy: 0.9583\n", + "\u001b[0;32mSubset training done.\u001b[0m\n", + "\u001b[0;33mLoading the best weights...\u001b[0m\n", + "\u001b[0;33mLoading weights from file cache\\model_SUB_checkpoint-079-0.9615.h5...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test acc: \u001b[0m\u001b[0;32m0.9615\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test loss: \u001b[0m\u001b[0;32m0.1285\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;91mModel accuracy did not improve from 0.9647436142. Not saving model.\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mImproved model loss from \u001b[0m\u001b[0;32m0.1409760416 \u001b[0m\u001b[0;33mto \u001b[0m\u001b[0;32m0.1285299212\u001b[0m\u001b[0;33m. \u001b[0m\u001b[0;96mSaving model.\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;36mSaving full model H5 format...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(FULL): \u001b[0m\u001b[0;32m1394.21 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(SUBo): \u001b[0m\u001b[0;32m887.06 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(OTHERo): \u001b[0m\u001b[0;32m507.15 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0;36m<---------------------------------------|Epoch [14] END|--------------------------------------->\u001b[0m\n", + "\u001b[0m\n", + "\u001b[0m\u001b[0mEpoch: \u001b[0m\u001b[0;36m15\u001b[0m\u001b[0m/\u001b[0m\u001b[0;32m384 (TSEC: 84)\u001b[0m\u001b[0;34m | \u001b[0m\u001b[0;32m[Learning the patterns]\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|8192|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0;33mPreparing train data...\u001b[0m\n", + "\u001b[0;33m- Augmenting Image Data...\u001b[0m\n", + "\u001b[0;33m- Normalizing Image Data...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mSetting training OneCycleLr::maxlr to \u001b[0m\u001b[0;32m[0.01\u001b[0m\u001b[0;31m\u001b[0m\u001b[0;32m]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mSetting training subset epoch.c to \u001b[0m\u001b[0;32m[6]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0;32mTraining on subset...\u001b[0m\n", + "Epoch 85/90\n", + "512/512 [==============================] - 155s 293ms/step - loss: 0.1445 - accuracy: 0.9534 - val_loss: 0.1528 - val_accuracy: 0.9471\n", + "Epoch 86/90\n", + "512/512 [==============================] - 148s 289ms/step - loss: 0.1381 - accuracy: 0.9539 - val_loss: 0.1298 - val_accuracy: 0.9583\n", + "Epoch 87/90\n", + "512/512 [==============================] - 146s 285ms/step - loss: 0.1036 - accuracy: 0.9672 - val_loss: 0.1514 - val_accuracy: 0.9535\n", + "Epoch 88/90\n", + "512/512 [==============================] - 147s 286ms/step - loss: 0.0726 - accuracy: 0.9781 - val_loss: 0.2329 - val_accuracy: 0.9455\n", + "Epoch 89/90\n", + "512/512 [==============================] - 146s 285ms/step - loss: 0.0519 - accuracy: 0.9857 - val_loss: 0.1476 - val_accuracy: 0.9535\n", + "Epoch 90/90\n", + "512/512 [==============================] - 146s 285ms/step - loss: 0.0307 - accuracy: 0.9916 - val_loss: 0.1609 - val_accuracy: 0.9535\n", + "\u001b[0;32mSubset training done.\u001b[0m\n", + "\u001b[0;33mLoading the best weights...\u001b[0m\n", + "\u001b[0;33mLoading weights from file cache\\model_SUB_checkpoint-086-0.9583.h5...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test acc: \u001b[0m\u001b[0;32m0.9583\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mModel Test loss: \u001b[0m\u001b[0;32m0.1298\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;91mModel accuracy did not improve from 0.9647436142. Not saving model.\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;91mModel loss did not improve from 0.1285299212. Not saving model.\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(FULL): \u001b[0m\u001b[0;32m1318.74 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(SUBo): \u001b[0m\u001b[0;32m890.47 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTime taken for epoch(OTHERo): \u001b[0m\u001b[0;32m428.26 \u001b[0m\u001b[0;36msec\u001b[0m\n", + "\u001b[0;36m<---------------------------------------|Epoch [15] END|--------------------------------------->\u001b[0m\n", + "\u001b[0m\n", + "\u001b[0m\u001b[0mEpoch: \u001b[0m\u001b[0;36m16\u001b[0m\u001b[0m/\u001b[0m\u001b[0;32m384 (TSEC: 90)\u001b[0m\u001b[0;34m | \u001b[0m\u001b[0;32m[Learning the patterns]\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mTaking a subset of \u001b[0m\u001b[0;32m[|8192|AdvSubset:True]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0;33mPreparing train data...\u001b[0m\n", + "\u001b[0;33m- Augmenting Image Data...\u001b[0m\n", + "\u001b[0;33m- Normalizing Image Data...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mSetting training OneCycleLr::maxlr to \u001b[0m\u001b[0;32m[0.01\u001b[0m\u001b[0;31m\u001b[0m\u001b[0;32m]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0m\u001b[0m\u001b[0;33mSetting training subset epoch.c to \u001b[0m\u001b[0;32m[6]\u001b[0m\u001b[0;33m...\u001b[0m\n", + "\u001b[0;32mTraining on subset...\u001b[0m\n", + "Epoch 91/96\n", + "512/512 [==============================] - 154s 291ms/step - loss: 0.1397 - accuracy: 0.9529 - val_loss: 0.1493 - val_accuracy: 0.9551\n", + "Epoch 92/96\n", + "512/512 [==============================] - 146s 285ms/step - loss: 0.1301 - accuracy: 0.9590 - val_loss: 0.1776 - val_accuracy: 0.9391\n", + "Epoch 93/96\n", + "288/512 [===============>..............] - ETA: 59s - loss: 0.0972 - accuracy: 0.9714" ] } ], @@ -17372,10 +17766,10 @@ "subset_epoch = 6 # subset_epoch: Number of epochs to train each subset.\n", "subset_epoch_FT = 6 # subset_epoch_FT: subset_epoch after pre-training epochs.\n", "PL_epoch = 26 # PL_epoch: Number of pre-training epochs. Use >=24 for large models or 0/1 for fine-tuning only. Common values: 8, 16, 26, 32, 64, 128.\n", - "subset_size = 4096 # subset_size: Size of each training subset. Common values: 512, 1024, 2048, 3200, 4096, 8192.\n", + "subset_size = 8192 # subset_size: Size of each training subset. Common values: 512, 1024, 2048, 3200, 4096, 8192.\n", "Conf_batch_size_REV2 = 16 # Conf_batch_size_REV2: Batch size.\n", "RES_Train = False # RES_Train: Resume training if True.\n", - "MAX_LR = 0.01 # MAX_LR: Maximum learning rate. Common values: 0.011, 0.01, 0.001.\n", + "MAX_LR = 0.011 # MAX_LR: Maximum learning rate. Common values: 0.011, 0.01, 0.001.\n", "DEC_LR = 0.00003 # DEC_LR: Learning rate decay.\n", "MIN_LR = 0.0005 # MIN_LR: Minimum learning rate.\n", "RES_LR = 0.006 # RES_LR: Resuming learning rate.\n",