Skip to content

Commit eb29b9e

Browse files
Focal Loss Implementation
1 parent f193bc5 commit eb29b9e

1 file changed

Lines changed: 38 additions & 14 deletions

File tree

RhythmAttention_Hybrid_CNN_Transformer_Architecture_for_Arrhythmia_Classification.ipynb

Lines changed: 38 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -89,25 +89,15 @@
8989
},
9090
{
9191
"cell_type": "code",
92-
"execution_count": 2,
92+
"execution_count": 7,
9393
"metadata": {},
94-
"outputs": [
95-
{
96-
"name": "stderr",
97-
"output_type": "stream",
98-
"text": [
99-
"2026-02-08 17:57:33.534489: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n",
100-
"2026-02-08 17:57:33.583704: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
101-
"To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
102-
"2026-02-08 17:57:34.748647: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n"
103-
]
104-
}
105-
],
94+
"outputs": [],
10695
"source": [
10796
"import pandas as pd\n",
10897
"import numpy as np\n",
10998
"import tensorflow as tf\n",
110-
"from sklearn.utils import class_weight"
99+
"from sklearn.utils import class_weight\n",
100+
"from tensorflow.keras import layers, models, backend as K"
111101
]
112102
},
113103
{
@@ -287,6 +277,40 @@
287277
"print(f\"Batch data shape: {x_batch.shape}\")\n",
288278
"print(f\"Batch labels shape: {y_batch.shape}\")"
289279
]
280+
},
281+
{
282+
"cell_type": "markdown",
283+
"metadata": {},
284+
"source": [
285+
"## **Focal Loss Implementation**"
286+
]
287+
},
288+
{
289+
"cell_type": "code",
290+
"execution_count": 8,
291+
"metadata": {},
292+
"outputs": [],
293+
"source": [
294+
"def focal_loss(gamma=2.0, alpha=0.25):\n",
295+
" \"\"\"\n",
296+
" Focuses on hard examples by reducing the weight of easy ones.\n",
297+
" gamma: power factor to scale the loss.\n",
298+
" alpha: balancing factor for classes.\n",
299+
" \"\"\"\n",
300+
" def focal_loss_fixed(y_true, y_pred):\n",
301+
" # Clip values to prevent log(0)\n",
302+
" epsilon = K.epsilon()\n",
303+
" y_pred = K.clip(y_pred, epsilon, 1.0 - epsilon)\n",
304+
" \n",
305+
" # Calculate Cross Entropy\n",
306+
" cross_entropy = -y_true * K.log(y_pred)\n",
307+
" \n",
308+
" # Calculate Focal Loss\n",
309+
" loss = alpha * K.pow(1 - y_pred, gamma) * cross_entropy\n",
310+
" \n",
311+
" return K.mean(K.sum(loss, axis=-1))\n",
312+
" return focal_loss_fixed"
313+
]
290314
}
291315
],
292316
"metadata": {

0 commit comments

Comments
 (0)