Skip to content

Commit 2ec5135

Browse files
Complected Project
1 parent da2be64 commit 2ec5135

1 file changed

Lines changed: 25 additions & 21 deletions

File tree

RhythmAttention_Hybrid_CNN_Transformer_Architecture_for_Arrhythmia_Classification.ipynb

Lines changed: 25 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -259,7 +259,7 @@
259259
" return signal * factor\n",
260260
"\n",
261261
"def apply_time_shift(signal, shift_limit=10):\n",
262-
" \"\"\"سیگنال را به چپ یا راست شیفت می‌دهد تا مدل به مکان دقیق موج حساس نباشد.\"\"\"\n",
262+
" \"\"\"Shifts the signal left or right so that the model is not sensitive to the exact location of the wave.\"\"\"\n",
263263
" shift = np.random.randint(-shift_limit, shift_limit)\n",
264264
" return np.roll(signal, shift, axis=0)"
265265
]
@@ -304,17 +304,16 @@
304304
" \n",
305305
" if augment:\n",
306306
" for i in range(batch_size):\n",
307-
" # اعمال آگمنتیشن سنگین روی کلاس‌های اقلیت (1, 2, 3, 4)\n",
308-
" # در بخش augment ژنراتور:\n",
307+
" # Apply heavy augmentation to minority classes (1, 2, 3, 4) \n",
309308
" if y_batch[i] != 0:\n",
310-
" if np.random.rand() > 0.3: # فقط ۷۰ درصد مواقع آگمنت کن، ۳۰ درصد نسخه اصلی را بده\n",
309+
" if np.random.rand() > 0.3: # Only augment 70% of the time, give the original 30% of the time.\n",
311310
" X_batch[i] = apply_time_shift(apply_amplitude_scaling(add_gaussian_noise(X_batch[i])))\n",
312311
" else:\n",
313-
" # نویز بسیار سبک برای کلاس اکثریت (0)\n",
312+
" # Very light noise for the majority class (0)\n",
314313
" if np.random.rand() > 0.7:\n",
315314
" X_batch[i] = add_gaussian_noise(X_batch[i], noise_level=0.002)\n",
316315
" \n",
317-
" # تبدیل به One-Hot\n",
316+
" # Become One-Hot\n",
318317
" y_batch_onehot = tf.keras.utils.to_categorical(y_batch, num_classes=5)\n",
319318
" \n",
320319
" yield X_batch, y_batch_onehot\n",
@@ -348,17 +347,17 @@
348347
"def focal_loss(gamma=2.0, alpha=0.25):\n",
349348
" def focal_loss_fixed(y_true, y_pred):\n",
350349
" y_true = tf.cast(y_true, tf.float32)\n",
351-
" # استفاده از epsilon برای جلوگیری از log(0)\n",
350+
" # Using epsilon to avoid log(0)\n",
352351
" epsilon = K.epsilon()\n",
353352
" y_pred = K.clip(y_pred, epsilon, 1.0 - epsilon)\n",
354353
" \n",
355-
" # محاسبه Cross Entropy به روش پایدار\n",
354+
" # Calculating Cross Entropy in a Stable Way\n",
356355
" cross_entropy = -y_true * K.log(y_pred)\n",
357356
" \n",
358-
" # وزن‌دهی Focal\n",
357+
" # Focal weighting\n",
359358
" loss = alpha * K.pow(1 - y_pred, gamma) * cross_entropy\n",
360359
" \n",
361-
" # جمع روی کلاس‌ها و میانگین روی بچ\n",
360+
" # Sum on classes and average on batch\n",
362361
" return tf.reduce_mean(tf.reduce_sum(loss, axis=-1))\n",
363362
" return focal_loss_fixed"
364363
]
@@ -620,16 +619,16 @@
620619
],
621620
"source": [
622621
"def build_rhythm_attention_model(input_shape=(187, 1), num_classes=5):\n",
623-
" # تنظیم مقدار Regularization\n",
622+
" # Setting the Regularization value\n",
624623
" reg = l2(0.001)\n",
625624
" \n",
626625
" inputs = layers.Input(shape=input_shape)\n",
627626
"\n",
628627
" # --- BLOCK 1: Robust CNN Feature Extractor ---\n",
629-
" # لایه اول با فیلتر بزرگ برای دیدن ساختار کلی موج\n",
628+
" # First layer with large filter to see the overall structure of the wave\n",
630629
" x = layers.Conv1D(64, kernel_size=7, padding='same', activation='relu', kernel_regularizer=reg)(inputs)\n",
631630
" x = layers.BatchNormalization()(x)\n",
632-
" x = layers.SpatialDropout1D(0.1)(x) # حذف Feature Map های نویزی\n",
631+
" x = layers.SpatialDropout1D(0.1)(x) # Removing noisy feature maps\n",
633632
" \n",
634633
" # Residual Block 1\n",
635634
" shortcut = x\n",
@@ -642,20 +641,19 @@
642641
" x = layers.SpatialDropout1D(0.1)(x)\n",
643642
"\n",
644643
" # Residual Block 2\n",
645-
" # تطبیق ابعاد برای Shortcut\n",
646644
" shortcut = layers.Conv1D(128, kernel_size=1, kernel_regularizer=reg)(x)\n",
647645
" x = layers.Conv1D(128, kernel_size=3, padding='same', activation='relu', kernel_regularizer=reg)(x)\n",
648646
" x = layers.BatchNormalization()(x)\n",
649647
" x = layers.MaxPool1D(pool_size=2)(x) \n",
650648
" shortcut = layers.MaxPool1D(pool_size=2)(shortcut)\n",
651649
" x = layers.Add()([x, shortcut]) \n",
652-
" x = layers.SpatialDropout1D(0.2)(x) # دراپ‌اوت قوی‌تر در لایه‌های عمیق\n",
650+
" x = layers.SpatialDropout1D(0.2)(x) # Stronger dropout in deep layers\n",
653651
"\n",
654652
" # --- BLOCK 2: Positional Encoding + Transformer ---\n",
655-
" # اضافه کردن اطلاعات زمانی\n",
653+
" # Add time information\n",
656654
" x = PositionalEmbedding(sequence_length=x.shape[1], embed_dim=x.shape[2])(x)\n",
657655
"\n",
658-
" # Multi-Head Attention (کاهش هدها به 4 برای تعمیم‌پذیری بهتر)\n",
656+
" # Multi-Head Attention \n",
659657
" attention_out = layers.MultiHeadAttention(\n",
660658
" num_heads=4, key_dim=128, dropout=0.2\n",
661659
" )(x, x)\n",
@@ -668,7 +666,7 @@
668666
" x = layers.Concatenate()([avg_pool, max_pool])\n",
669667
" \n",
670668
" x = layers.Dense(128, activation='relu', kernel_regularizer=reg)(x)\n",
671-
" x = layers.Dropout(0.5)(x) # افزایش Dropout به 0.5 برای مقابله با Overfitting\n",
669+
" x = layers.Dropout(0.5)(x) \n",
672670
" x = layers.Dense(64, activation='relu', kernel_regularizer=reg)(x)\n",
673671
" \n",
674672
" outputs = layers.Dense(num_classes, activation='softmax')(x)\n",
@@ -694,10 +692,9 @@
694692
"metadata": {},
695693
"outputs": [],
696694
"source": [
697-
"# تنظیمات پیشنهادی برای کامپایل\n",
698695
"model.compile(\n",
699-
" optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), # سرعت یادگیری را کمتر کنید\n",
700-
" loss=focal_loss(gamma=2.0, alpha=1.0), # آلفا ۱ چون ژنراتور وزن‌دهی شده است\n",
696+
" optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), \n",
697+
" loss=focal_loss(gamma=2.0, alpha=1.0), \n",
701698
" metrics=['accuracy']\n",
702699
")"
703700
]
@@ -1476,6 +1473,13 @@
14761473
"print(\"\\nClassification Report:\\n\")\n",
14771474
"print(classification_report(y_true, y_pred, target_names=['N', 'S', 'V', 'F', 'Q']))"
14781475
]
1476+
},
1477+
{
1478+
"cell_type": "markdown",
1479+
"metadata": {},
1480+
"source": [
1481+
"## **THE END**"
1482+
]
14791483
}
14801484
],
14811485
"metadata": {

0 commit comments

Comments
 (0)