|
259 | 259 | " return signal * factor\n", |
260 | 260 | "\n", |
261 | 261 | "def apply_time_shift(signal, shift_limit=10):\n", |
262 | | - " \"\"\"سیگنال را به چپ یا راست شیفت میدهد تا مدل به مکان دقیق موج حساس نباشد.\"\"\"\n", |
| 262 | + " \"\"\"Shifts the signal left or right so that the model is not sensitive to the exact location of the wave.\"\"\"\n", |
263 | 263 | " shift = np.random.randint(-shift_limit, shift_limit)\n", |
264 | 264 | " return np.roll(signal, shift, axis=0)" |
265 | 265 | ] |
|
304 | 304 | " \n", |
305 | 305 | " if augment:\n", |
306 | 306 | " for i in range(batch_size):\n", |
307 | | - " # اعمال آگمنتیشن سنگین روی کلاسهای اقلیت (1, 2, 3, 4)\n", |
308 | | - " # در بخش augment ژنراتور:\n", |
| 307 | + " # Apply heavy augmentation to minority classes (1, 2, 3, 4) \n", |
309 | 308 | " if y_batch[i] != 0:\n", |
310 | | - " if np.random.rand() > 0.3: # فقط ۷۰ درصد مواقع آگمنت کن، ۳۰ درصد نسخه اصلی را بده\n", |
| 309 | + " if np.random.rand() > 0.3: # Only augment 70% of the time, give the original 30% of the time.\n", |
311 | 310 | " X_batch[i] = apply_time_shift(apply_amplitude_scaling(add_gaussian_noise(X_batch[i])))\n", |
312 | 311 | " else:\n", |
313 | | - " # نویز بسیار سبک برای کلاس اکثریت (0)\n", |
| 312 | + " # Very light noise for the majority class (0)\n", |
314 | 313 | " if np.random.rand() > 0.7:\n", |
315 | 314 | " X_batch[i] = add_gaussian_noise(X_batch[i], noise_level=0.002)\n", |
316 | 315 | " \n", |
317 | | - " # تبدیل به One-Hot\n", |
| 316 | + " # Become One-Hot\n", |
318 | 317 | " y_batch_onehot = tf.keras.utils.to_categorical(y_batch, num_classes=5)\n", |
319 | 318 | " \n", |
320 | 319 | " yield X_batch, y_batch_onehot\n", |
|
348 | 347 | "def focal_loss(gamma=2.0, alpha=0.25):\n", |
349 | 348 | " def focal_loss_fixed(y_true, y_pred):\n", |
350 | 349 | " y_true = tf.cast(y_true, tf.float32)\n", |
351 | | - " # استفاده از epsilon برای جلوگیری از log(0)\n", |
| 350 | + " # Using epsilon to avoid log(0)\n", |
352 | 351 | " epsilon = K.epsilon()\n", |
353 | 352 | " y_pred = K.clip(y_pred, epsilon, 1.0 - epsilon)\n", |
354 | 353 | " \n", |
355 | | - " # محاسبه Cross Entropy به روش پایدار\n", |
| 354 | + " # Calculating Cross Entropy in a Stable Way\n", |
356 | 355 | " cross_entropy = -y_true * K.log(y_pred)\n", |
357 | 356 | " \n", |
358 | | - " # وزندهی Focal\n", |
| 357 | + " # Focal weighting\n", |
359 | 358 | " loss = alpha * K.pow(1 - y_pred, gamma) * cross_entropy\n", |
360 | 359 | " \n", |
361 | | - " # جمع روی کلاسها و میانگین روی بچ\n", |
| 360 | + " # Sum on classes and average on batch\n", |
362 | 361 | " return tf.reduce_mean(tf.reduce_sum(loss, axis=-1))\n", |
363 | 362 | " return focal_loss_fixed" |
364 | 363 | ] |
|
620 | 619 | ], |
621 | 620 | "source": [ |
622 | 621 | "def build_rhythm_attention_model(input_shape=(187, 1), num_classes=5):\n", |
623 | | - " # تنظیم مقدار Regularization\n", |
| 622 | + " # Setting the Regularization value\n", |
624 | 623 | " reg = l2(0.001)\n", |
625 | 624 | " \n", |
626 | 625 | " inputs = layers.Input(shape=input_shape)\n", |
627 | 626 | "\n", |
628 | 627 | " # --- BLOCK 1: Robust CNN Feature Extractor ---\n", |
629 | | - " # لایه اول با فیلتر بزرگ برای دیدن ساختار کلی موج\n", |
| 628 | + " # First layer with large filter to see the overall structure of the wave\n", |
630 | 629 | " x = layers.Conv1D(64, kernel_size=7, padding='same', activation='relu', kernel_regularizer=reg)(inputs)\n", |
631 | 630 | " x = layers.BatchNormalization()(x)\n", |
632 | | - " x = layers.SpatialDropout1D(0.1)(x) # حذف Feature Map های نویزی\n", |
| 631 | + " x = layers.SpatialDropout1D(0.1)(x) # Removing noisy feature maps\n", |
633 | 632 | " \n", |
634 | 633 | " # Residual Block 1\n", |
635 | 634 | " shortcut = x\n", |
|
642 | 641 | " x = layers.SpatialDropout1D(0.1)(x)\n", |
643 | 642 | "\n", |
644 | 643 | " # Residual Block 2\n", |
645 | | - " # تطبیق ابعاد برای Shortcut\n", |
646 | 644 | " shortcut = layers.Conv1D(128, kernel_size=1, kernel_regularizer=reg)(x)\n", |
647 | 645 | " x = layers.Conv1D(128, kernel_size=3, padding='same', activation='relu', kernel_regularizer=reg)(x)\n", |
648 | 646 | " x = layers.BatchNormalization()(x)\n", |
649 | 647 | " x = layers.MaxPool1D(pool_size=2)(x) \n", |
650 | 648 | " shortcut = layers.MaxPool1D(pool_size=2)(shortcut)\n", |
651 | 649 | " x = layers.Add()([x, shortcut]) \n", |
652 | | - " x = layers.SpatialDropout1D(0.2)(x) # دراپاوت قویتر در لایههای عمیق\n", |
| 650 | + " x = layers.SpatialDropout1D(0.2)(x) # Stronger dropout in deep layers\n", |
653 | 651 | "\n", |
654 | 652 | " # --- BLOCK 2: Positional Encoding + Transformer ---\n", |
655 | | - " # اضافه کردن اطلاعات زمانی\n", |
| 653 | + " # Add time information\n", |
656 | 654 | " x = PositionalEmbedding(sequence_length=x.shape[1], embed_dim=x.shape[2])(x)\n", |
657 | 655 | "\n", |
658 | | - " # Multi-Head Attention (کاهش هدها به 4 برای تعمیمپذیری بهتر)\n", |
| 656 | + " # Multi-Head Attention \n", |
659 | 657 | " attention_out = layers.MultiHeadAttention(\n", |
660 | 658 | " num_heads=4, key_dim=128, dropout=0.2\n", |
661 | 659 | " )(x, x)\n", |
|
668 | 666 | " x = layers.Concatenate()([avg_pool, max_pool])\n", |
669 | 667 | " \n", |
670 | 668 | " x = layers.Dense(128, activation='relu', kernel_regularizer=reg)(x)\n", |
671 | | - " x = layers.Dropout(0.5)(x) # افزایش Dropout به 0.5 برای مقابله با Overfitting\n", |
| 669 | + " x = layers.Dropout(0.5)(x) \n", |
672 | 670 | " x = layers.Dense(64, activation='relu', kernel_regularizer=reg)(x)\n", |
673 | 671 | " \n", |
674 | 672 | " outputs = layers.Dense(num_classes, activation='softmax')(x)\n", |
|
694 | 692 | "metadata": {}, |
695 | 693 | "outputs": [], |
696 | 694 | "source": [ |
697 | | - "# تنظیمات پیشنهادی برای کامپایل\n", |
698 | 695 | "model.compile(\n", |
699 | | - " optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), # سرعت یادگیری را کمتر کنید\n", |
700 | | - " loss=focal_loss(gamma=2.0, alpha=1.0), # آلفا ۱ چون ژنراتور وزندهی شده است\n", |
| 696 | + " optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), \n", |
| 697 | + " loss=focal_loss(gamma=2.0, alpha=1.0), \n", |
701 | 698 | " metrics=['accuracy']\n", |
702 | 699 | ")" |
703 | 700 | ] |
|
1476 | 1473 | "print(\"\\nClassification Report:\\n\")\n", |
1477 | 1474 | "print(classification_report(y_true, y_pred, target_names=['N', 'S', 'V', 'F', 'Q']))" |
1478 | 1475 | ] |
| 1476 | + }, |
| 1477 | + { |
| 1478 | + "cell_type": "markdown", |
| 1479 | + "metadata": {}, |
| 1480 | + "source": [ |
| 1481 | + "## **THE END**" |
| 1482 | + ] |
1479 | 1483 | } |
1480 | 1484 | ], |
1481 | 1485 | "metadata": { |
|
0 commit comments