Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Alpha b #225

Merged
merged 10 commits into from
Mar 19, 2024
4 changes: 2 additions & 2 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -100,5 +100,5 @@ Samples/*

# Exclude backup load
/Backup_*_COPY.ipynb
/ruff_check_results.txt
/ruff_format_results.txt
/ruff_check_results.txt
/ruff_format_results.txt
34 changes: 17 additions & 17 deletions BETA_E_Model_T&T.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,8 @@
"import tensorflow as tf\n",
"from keras.models import Model\n",
"import matplotlib.pyplot as plt\n",
"from keras.optimizers import * # noqa: F403\n",
"from adabelief_tf import AdaBeliefOptimizer # noqa: F401\n",
"from keras.optimizers import * # noqa: F403\n",
"from adabelief_tf import AdaBeliefOptimizer # noqa: F401\n",
"\n",
"# from tensorflow_addons.optimizers import Yogi\n",
"from imblearn.over_sampling import SMOTE\n",
Expand Down Expand Up @@ -837,7 +837,7 @@
"model = Model(inputs=EfficientNet_M.inputs, outputs=EfficientNet_M.outputs)\n",
"\n",
"# compile model\n",
"opt = SGD(momentum=0.9)\n",
"opt = SGD(momentum=0.9) # noqa: F405\n",
"# opt = SGD(learning_rate=0.008, momentum=0.85, decay=0.001)\n",
"# opt = Adam()\n",
"model.compile(optimizer=opt, loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n",
Expand Down Expand Up @@ -880,7 +880,7 @@
"model = Model(inputs=ConvNeXtLarge_M.inputs, outputs=ConvNeXtLarge_M.outputs)\n",
"\n",
"# compile model\n",
"opt = SGD(momentum=0.9)\n",
"opt = SGD(momentum=0.9) # noqa: F405\n",
"# opt = SGD(learning_rate=0.008, momentum=0.85, decay=0.001)\n",
"# opt = Adam()\n",
"model.compile(optimizer=opt, loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n",
Expand Down Expand Up @@ -3128,7 +3128,7 @@
" # opt = Adamax() # noqa: F405\n",
" # opt = RMSprop(momentum=0.9) # noqa: F405\n",
" # opt = Adagrad() # noqa: F405\n",
" opt = AdaBeliefOptimizer(epsilon=1e-7, rectify=False, weight_decay=5e-4, print_change_log=False, amsgrad=False) # noqa: F405\n",
" opt = AdaBeliefOptimizer(epsilon=1e-7, rectify=False, weight_decay=5e-4, print_change_log=False, amsgrad=False) # noqa: F405\n",
" # opt = Yogi() # noqa: F405\n",
" model_EfficientNetB7_NS.compile(\n",
" optimizer=opt, loss=\"categorical_crossentropy\", metrics=[\"accuracy\"]\n",
Expand Down Expand Up @@ -3228,7 +3228,7 @@
" print(\"Total model layers: \", len(combo_model.layers))\n",
"\n",
" # OPT/compile\n",
" opt = SGD(momentum=0.9)\n",
" opt = SGD(momentum=0.9) # noqa: F405\n",
" combo_model.compile(optimizer=opt, loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n",
"\n",
" return combo_model\n",
Expand Down Expand Up @@ -7425,7 +7425,7 @@
"model = Model(inputs=EfficientNet_M.inputs, outputs=EfficientNet_M.outputs)\n",
"\n",
"# compile model\n",
"opt = SGD(momentum=0.9)\n",
"opt = SGD(momentum=0.9) # noqa: F405\n",
"# opt = AdaBeliefOptimizer(epsilon=1e-7, rectify=False, weight_decay=1e-2, print_change_log=False, total_steps=0, amsgrad=False)\n",
"# opt = AdaBeliefOptimizer(epsilon=1e-7, rectify=False, weight_decay=1e-3)\n",
"# opt = Adam()\n",
Expand Down Expand Up @@ -8764,7 +8764,7 @@
" model_EfficientNetB4_NS = Model(inputs=base_model.input, outputs=predictions)\n",
" print(\"Total model layers: \", len(model_EfficientNetB4_NS.layers))\n",
" # OPT/compile\n",
" opt = SGD(momentum=0.95, nesterov=False)\n",
" opt = SGD(momentum=0.95, nesterov=False) # noqa: F405\n",
" # opt = Nadam()\n",
" # opt = Adamax()\n",
" # opt = RMSprop(momentum=0.9)\n",
Expand Down Expand Up @@ -8851,7 +8851,7 @@
" model_EfficientNetB0_NS = Model(inputs=base_model.input, outputs=predictions)\n",
" print(\"Total model layers: \", len(model_EfficientNetB0_NS.layers))\n",
" # OPT/compile\n",
" opt = SGD(momentum=0.92, nesterov=False)\n",
" opt = SGD(momentum=0.92, nesterov=False) # noqa: F405\n",
" # opt = Nadam()\n",
" # opt = Adamax()\n",
" # opt = RMSprop(momentum=0.9)\n",
Expand Down Expand Up @@ -8923,7 +8923,7 @@
" model_EfficientNetB7_NS = Model(inputs=base_model.input, outputs=predictions)\n",
" print(\"Total model layers: \", len(model_EfficientNetB7_NS.layers))\n",
" # OPT/compile\n",
" opt = SGD(momentum=0.9)\n",
" opt = SGD(momentum=0.9) # noqa: F405\n",
" # opt = Yogi()\n",
" model_EfficientNetB7_NS.compile(optimizer=opt, loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n",
"\n",
Expand Down Expand Up @@ -8998,7 +8998,7 @@
" model_EfficientNetB7_NS = Model(inputs=base_model.input, outputs=predictions)\n",
" print(\"Total model layers: \", len(model_EfficientNetB7_NS.layers))\n",
" # OPT/compile\n",
" opt = SGD(momentum=0.9, nesterov=False)\n",
" opt = SGD(momentum=0.9, nesterov=False) # noqa: F405\n",
" # opt = Nadam()\n",
" # opt = Adamax()\n",
" # opt = RMSprop(momentum=0.9)\n",
Expand Down Expand Up @@ -9063,7 +9063,7 @@
" model_EfficientNetB7_NS = Model(inputs=input, outputs=predictions)\n",
" print(\"Total model layers: \", len(model_EfficientNetB7_NS.layers))\n",
" # OPT/compile\n",
" opt = SGD(momentum=0.9)\n",
" opt = SGD(momentum=0.9) # noqa: F405\n",
" # opt = Yogi()\n",
" model_EfficientNetB7_NS.compile(optimizer=opt, loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n",
"\n",
Expand Down Expand Up @@ -9776,7 +9776,7 @@
" model_EfficientNetB4_NS = Model(inputs=base_model.input, outputs=predictions)\n",
" print(\"Total model layers: \", len(model_EfficientNetB4_NS.layers))\n",
" # OPT/compile\n",
" opt = SGD(momentum=0.92, nesterov=False)\n",
" opt = SGD(momentum=0.92, nesterov=False) # noqa: F405\n",
" # opt = Nadam()\n",
" # opt = Adamax()\n",
" # opt = RMSprop(momentum=0.9)\n",
Expand Down Expand Up @@ -9817,7 +9817,7 @@
"gc.collect()\n",
"tf.keras.backend.clear_session()\n",
"# CONF/Other\n",
"LRF_OPT = SGD(momentum=0.9)\n",
"LRF_OPT = SGD(momentum=0.9) # noqa: F405\n",
"LFR_batch_size = 1 # or any other batch size that fits in your memory\n",
"LRF_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(LFR_batch_size)\n",
"# Instantiate LrFinder\n",
Expand Down Expand Up @@ -9875,7 +9875,7 @@
"# CEC_opt = Adagrad()\n",
"# CEC_opt = Yogi()\n",
"# CEC_opt = AdaBeliefOptimizer(epsilon=1e-7, rectify=False, weight_decay=1e-3)\n",
"CEC_opt = SGD(momentum=0.9, nesterov=False)\n",
"CEC_opt = SGD(momentum=0.9, nesterov=False) # noqa: F405\n",
"# CEC_opt = Adam()\n",
"# Main\n",
"try:\n",
Expand Down Expand Up @@ -9986,7 +9986,7 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 16,
"metadata": {
"ExecuteTime": {
"end_time": "2023-12-28T07:04:23.573633300Z",
Expand All @@ -10001,7 +10001,7 @@
"Training the model...\n",
"\u001b[0;33m\n",
"Setup Verbose:\u001b[0m\n",
"\u001b[0m\u001b[0m\u001b[0;36mSetting TensorBoard Log dir to \u001b[0m\u001b[0;32m[logs/fit/y2024_m03_d19-h21_m07_s37]\u001b[0m\u001b[0;36m...\u001b[0m\n",
"\u001b[0m\u001b[0m\u001b[0;36mSetting TensorBoard Log dir to \u001b[0m\u001b[0;32m[logs/fit/y2024_m03_d19-h21_m24_s12]\u001b[0m\u001b[0;36m...\u001b[0m\n",
"\u001b[0m\u001b[0m\u001b[0;36mUse_extended_tensorboard \u001b[0m\u001b[0;32m[False]\u001b[0m\u001b[0;36m.\u001b[0m\n",
"\u001b[0m\u001b[0m\u001b[0;36mDebug_OUTPUT_DPS \u001b[0m\u001b[0;32m[True]\u001b[0m\u001b[0;36m.\u001b[0m\n",
"\u001b[0m\u001b[0m\u001b[0;36mUse_OneCycleLr \u001b[0m\u001b[0;32m[False]\u001b[0m\u001b[0;36m.\u001b[0m\n",
Expand Down
Loading
Loading