@@ -1050,6 +1050,9 @@ def test_calculate_histogram_rates(l1b_histrates):
10501050 exposure_factors_60deg = np .zeros ((2 , 7 , 6 ))
10511051 exposure_factors_6deg [0 , 0 , 0 ] = 1
10521052 exposure_factors_60deg [0 , 0 , 0 ] = 1
1053+ exposure_factors_6deg [0 , 1 , 0 ] = 0
1054+ exposure_factors_60deg [0 , 1 , 0 ] = 0
1055+
10531056 exposure_factors = {}
10541057 exposure_factors ["6deg" ] = exposure_factors_6deg
10551058 exposure_factors ["60deg" ] = exposure_factors_60deg
@@ -1615,7 +1618,7 @@ def test_filters_by_count_and_time_window(self, mock_repoint):
16151618 },
16161619 coords = {"epoch" : [0 , 1 , 2 , 3 , 4 ]},
16171620 )
1618- # Time window: [5s, 25s] - should include epochs 1 and 2
1621+ # # Time window: [5s, 25s] - should include epochs 1 and 2
16191622 expected_mask = np .array ([False , True , True , False , False ])
16201623
16211624 # Act
@@ -1722,10 +1725,11 @@ def test_profile_for_group_end_bins_excluded(self):
17221725 assert count_per_bin [719 ] == 0
17231726 # All other bins should have count=2
17241727 assert np .all (count_per_bin [:718 ] == 2 )
1725- # End bins should have FILLVAL
1726- assert np .all (np .isnan (avg_amplitude [718 :]))
1727- # Middle bins should have average value
1728+ # Averages should be 100 for all bins except the excluded ones
17281729 assert np .all (avg_amplitude [:718 ] == 100.0 )
1730+ # Excluded bins should be NaN
1731+ assert np .isnan (avg_amplitude [718 ])
1732+ assert np .isnan (avg_amplitude [719 ])
17291733
17301734 def test_profile_for_group_empty_data (self ):
17311735 """Test handling of empty data array."""
@@ -1751,6 +1755,7 @@ def test_profiles_by_group_creates_correct_groups(self, mock_repoint):
17511755 mock_repoint .return_value = pd .DataFrame (
17521756 {"repoint_in_progress" : [False ] * n_records }
17531757 )
1758+ met_times = np .arange (n_records , dtype = np .float64 ) * 15.0
17541759 l1a_star = xr .Dataset (
17551760 {
17561761 "count" : ("epoch" , [720 ] * n_records ),
@@ -1764,7 +1769,7 @@ def test_profiles_by_group_creates_correct_groups(self, mock_repoint):
17641769 ),
17651770 },
17661771 coords = {
1767- "epoch" : met_to_ttj2000ns (np . arange ( n_records ) * 15.0 ),
1772+ "epoch" : met_to_ttj2000ns (met_times ),
17681773 "samples" : np .arange (720 ),
17691774 },
17701775 )
@@ -1887,7 +1892,10 @@ def test_initializes_with_spin_data(
18871892 l1a_star = xr .Dataset (
18881893 {
18891894 "count" : ("epoch" , [720 ] * n_records ),
1890- "shcoarse" : ("epoch" , met_times ),
1895+ "shcoarse" : (
1896+ "epoch" ,
1897+ np .arange (n_records , dtype = np .float64 ) * 15.0 ,
1898+ ),
18911899 "data" : (
18921900 ("epoch" , "samples" ),
18931901 np .random .randint (100 , 200 , size = (n_records , 720 ), dtype = np .uint16 ),
@@ -2111,7 +2119,10 @@ def test_multiple_groups_created(
21112119 l1a_star = xr .Dataset (
21122120 {
21132121 "count" : ("epoch" , [720 ] * n_records ),
2114- "shcoarse" : ("epoch" , met_times ),
2122+ "shcoarse" : (
2123+ "epoch" ,
2124+ np .arange (n_records , dtype = np .float64 ) * 15.0 ,
2125+ ),
21152126 "data" : (
21162127 ("epoch" , "samples" ),
21172128 np .ones ((n_records , 720 ), dtype = np .uint16 ) * 100 ,
@@ -2582,26 +2593,34 @@ def test_split_backgrounds_and_goodtimes_dataset(attr_mgr_l1b):
25822593 assert "h_background_variance" in bgrates_ds .data_vars
25832594 assert "o_background_rates" in bgrates_ds .data_vars
25842595 assert "o_background_variance" in bgrates_ds .data_vars
2585- assert "start_met" in bgrates_ds .data_vars # Included in both datasets
2586- assert "end_met" in bgrates_ds .data_vars # Included in both datasets
2587- assert "bin_start" in bgrates_ds .data_vars # Included in both datasets
2588- assert "bin_end" in bgrates_ds .data_vars # Included in both datasets
2589- assert "esa_goodtime_flags" not in bgrates_ds .data_vars # Only in goodtimes
2596+ # Note: bgrates uses 'met' dimension, goodtimes has epoch in data vars
25902597
2591- # Assert - Check goodtimes dataset has goodtime fields
2598+ # Check goodtimes dataset structure
25922599 assert "gt_start_met" in goodtimes_ds .data_vars
25932600 assert "gt_end_met" in goodtimes_ds .data_vars
25942601 assert "bin_start" in goodtimes_ds .data_vars
25952602 assert "bin_end" in goodtimes_ds .data_vars
25962603 assert "esa_goodtime_flags" in goodtimes_ds .data_vars
2597- assert "h_background_rates" not in goodtimes_ds .data_vars # Only in bgrates
2598- assert "h_background_variance" not in goodtimes_ds .data_vars # Only in bgrates
2599- assert "o_background_rates" not in goodtimes_ds .data_vars # Only in bgrates
2600- assert "o_background_variance" not in goodtimes_ds .data_vars # Only in bgrates
26012604
2602- # Assert - Check global attributes were set
2603- assert "Logical_source" in bgrates_ds .attrs
2604- assert "Logical_source" in goodtimes_ds .attrs
2605+ # Check dimensions
2606+ assert bgrates_ds ["h_background_rates" ].dims == ("met" , "esa_step" )
2607+ assert bgrates_ds ["h_background_rates" ].shape [1 ] == 7 # 7 ESA steps
2608+
2609+ # Check that goodtime intervals were created
2610+ assert len (goodtimes_ds ["gt_start_met" ]) > 0
2611+ assert len (goodtimes_ds ["gt_end_met" ]) > 0
2612+
2613+ # Check that start times are before end times
2614+ assert np .all (
2615+ goodtimes_ds ["gt_start_met" ].values <= goodtimes_ds ["gt_end_met" ].values
2616+ )
2617+
2618+ # Check bin_start and bin_end values
2619+ assert np .all (goodtimes_ds ["bin_start" ].values == 0 )
2620+ assert np .all (goodtimes_ds ["bin_end" ].values == 59 )
2621+
2622+ # Check ESA goodtime flags are all 1 (good)
2623+ assert np .all (goodtimes_ds ["esa_goodtime_flags" ].values == 1 )
26052624
26062625
26072626def test_l1b_bgrates_and_goodtimes_azimuth_bins (attr_mgr_l1b ):
@@ -2870,3 +2889,101 @@ def test_l1b_bgrates_and_goodtimes_rate_transition_high_to_low_to_high(attr_mgr_
28702889 # Background rates should be positive for all intervals
28712890 assert np .all (l1b_bgrates_ds ["h_background_rates" ].values > 0 )
28722891 assert np .all (l1b_bgrates_ds ["o_background_rates" ].values > 0 )
2892+
2893+
2894+ def test_l1b_bgrates_and_goodtimes_large_interval_with_active_tracking (attr_mgr_l1b ):
2895+ """
2896+ Test that an active goodtime interval is properly closed when a large interval
2897+ gap is encountered.
2898+
2899+ This test ensures the code path where:
2900+ 1. We're actively tracking an interval (begin > 0.0)
2901+ 2. A chunk with interval > (interval_nom + delay_max) is encountered
2902+ 3. The active interval is closed before skipping the gap
2903+ """
2904+ # Arrange - Create dataset where we start tracking, then hit a large interval
2905+ cycle_count = 10
2906+ delay_max = 840
2907+ met_spacing = 42
2908+
2909+ # First: Create enough low-rate chunks to start tracking (begin > 0.0)
2910+ num_chunks_before_gap = 2 # 2 chunks of 10 epochs each = 20 epochs
2911+ epochs_per_chunk = 10
2912+ num_epochs_first = num_chunks_before_gap * epochs_per_chunk
2913+
2914+ met_start = 473389200
2915+ met_times_first = np .arange (
2916+ met_start , met_start + num_epochs_first * met_spacing , met_spacing
2917+ )
2918+
2919+ # Second: Create a chunk where the interval is too large
2920+ # The interval is measured from the first epoch of the chunk to the last
2921+ # We need interval > (interval_nom + delay_max) = 4200 + 840 = 5040
2922+ large_gap = 6000 # Larger than threshold
2923+ met_times_gap_chunk_start = met_times_first [- 1 ] + met_spacing
2924+
2925+ # Create the problematic chunk (10 more epochs)
2926+ met_times_gap_chunk = np .arange (
2927+ met_times_gap_chunk_start ,
2928+ met_times_gap_chunk_start + epochs_per_chunk * met_spacing ,
2929+ met_spacing ,
2930+ )
2931+
2932+ met_times_gap_chunk_adjusted = met_times_gap_chunk .copy ()
2933+ met_times_gap_chunk_adjusted [- 1 ] = met_times_gap_chunk [0 ] + large_gap
2934+
2935+ # Third: Add more normal data after the gap
2936+ met_times_after = np .arange (
2937+ met_times_gap_chunk_adjusted [- 1 ] + met_spacing ,
2938+ met_times_gap_chunk_adjusted [- 1 ] + met_spacing + 200 * met_spacing ,
2939+ met_spacing ,
2940+ )
2941+
2942+ met_times = np .concatenate (
2943+ [met_times_first , met_times_gap_chunk_adjusted , met_times_after ]
2944+ )
2945+ epoch_times = met_to_ttj2000ns (met_times )
2946+
2947+ # All counts are low (below h_bg_rate_nom = 0.0028) to ensure we start tracking
2948+ h_counts = np .ones ((len (met_times ), 7 , 60 )) * 0.00025
2949+ o_counts = np .ones ((len (met_times ), 7 , 60 )) * 0.000025
2950+
2951+ l1b_histrates = xr .Dataset (
2952+ {
2953+ "h_counts" : (("epoch" , "esa_step" , "spin_bin_6" ), h_counts ),
2954+ "o_counts" : (("epoch" , "esa_step" , "spin_bin_6" ), o_counts ),
2955+ },
2956+ coords = {
2957+ "epoch" : epoch_times ,
2958+ "esa_step" : np .arange (1 , 8 ),
2959+ "spin_bin_6" : np .arange (60 ),
2960+ },
2961+ )
2962+
2963+ sci_dependencies = {"imap_lo_l1b_histrates" : l1b_histrates }
2964+
2965+ # Act
2966+ result = l1b_bgrates_and_goodtimes (
2967+ sci_dependencies , attr_mgr_l1b , cycle_count = cycle_count , delay_max = delay_max
2968+ )
2969+
2970+ # Assert
2971+ l1b_bgrates_ds , l1b_goodtimes_ds = result
2972+
2973+ # Should have created at least 2 intervals:
2974+ # 1. The interval that was closed before the gap
2975+ # 2. The interval after the gap
2976+ assert len (l1b_goodtimes_ds ["gt_start_met" ]) >= 2
2977+
2978+ # The first interval should end before the gap chunk
2979+ # (it should be closed when we detect the large interval)
2980+ first_interval_end = l1b_goodtimes_ds ["gt_end_met" ].values [0 ]
2981+ gap_chunk_start = met_times_gap_chunk_adjusted [0 ]
2982+
2983+ # The first interval should end before the gap chunk starts
2984+ # (with the +320 offset applied in the code)
2985+ assert first_interval_end < gap_chunk_start + 320
2986+
2987+ # Verify background rates are valid
2988+ assert np .all (l1b_bgrates_ds ["h_background_rates" ].values > 0 )
2989+ assert np .all (l1b_bgrates_ds ["o_background_rates" ].values > 0 )
0 commit comments