Skip to content

Commit ae73a5f

Browse files
committed
Merge branch 'lo-goodtimes' of https://github.com/sdhoyt/imap_processing into Lo-L1c-GT
2 parents 7b15922 + 882be30 commit ae73a5f

2 files changed

Lines changed: 169 additions & 48 deletions

File tree

imap_processing/lo/l1b/lo_l1b.py

Lines changed: 32 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -191,6 +191,7 @@
191191

192192
# -------------------------------------------------------------------
193193
DE_CLOCK_TICK_S = 4.096e-3 # seconds per DE clock tick
194+
NUM_ESA_STEPS = 7
194195

195196

196197
def lo_l1b(
@@ -2545,30 +2546,33 @@ def l1b_bgrates_and_goodtimes(
25452546
interval_nom = 420 * cycle_count # seconds
25462547
exposure = interval_nom * 0.5 # 50% duty cycle
25472548

2548-
h_intensity = np.sum(l1b_histrates["h_counts"][:, 0:7, 20:50], axis=(1, 2))
2549-
o_intensity = np.sum(l1b_histrates["o_counts"][:, 0:7, 20:50], axis=(1, 2))
2550-
epochs_ttj2000 = l1b_histrates["epoch"][:]
2549+
h_intensity = np.sum(
2550+
l1b_histrates["h_counts"][:, 0:NUM_ESA_STEPS, 20:50], axis=(1, 2)
2551+
)
2552+
o_intensity = np.sum(
2553+
l1b_histrates["o_counts"][:, 0:NUM_ESA_STEPS, 20:50], axis=(1, 2)
2554+
)
25512555

25522556
# Use proper SPICE-based time conversion with current kernels
25532557
# Note: The reference script adds +9 seconds because they use an
25542558
# "older time kernel (pre 2012)"
25552559
# We use current SPICE kernels, so we should NOT add that offset
2556-
shcoarse = ttj2000ns_to_met(epochs_ttj2000)
2557-
# Convert to plain numpy array for easier indexing
2558-
if hasattr(shcoarse, "values"):
2559-
shcoarse = shcoarse.values
2560-
shcoarse = np.asarray(shcoarse, dtype=np.float64)
2560+
met = ttj2000ns_to_met(l1b_histrates["epoch"].values)
25612561

25622562
max_row_count = np.shape(h_intensity)[0]
25632563
bg_start_met = xr.DataArray([0.0])
25642564
bg_end_met = xr.DataArray([0.0])
25652565
epochs = l1b_histrates["epoch"].values
25662566
epochs = xr.DataArray(epochs, dims=["epoch"])
25672567
goodtimes = xr.DataArray(np.zeros((max_row_count, 2), dtype=np.int64))
2568-
h_background_rate = xr.DataArray(np.zeros((1, 7), dtype=np.float32))
2569-
h_background_rate_variance = xr.DataArray(np.zeros((1, 7), dtype=np.float32))
2570-
o_background_rate = xr.DataArray(np.zeros((1, 7), dtype=np.float32))
2571-
o_background_rate_variance = xr.DataArray(np.zeros((1, 7), dtype=np.float32))
2568+
h_background_rate = xr.DataArray(np.zeros((1, NUM_ESA_STEPS), dtype=np.float32))
2569+
h_background_rate_variance = xr.DataArray(
2570+
np.zeros((1, NUM_ESA_STEPS), dtype=np.float32)
2571+
)
2572+
o_background_rate = xr.DataArray(np.zeros((1, NUM_ESA_STEPS), dtype=np.float32))
2573+
o_background_rate_variance = xr.DataArray(
2574+
np.zeros((1, NUM_ESA_STEPS), dtype=np.float32)
2575+
)
25722576

25732577
# Walk through the histrate data in chunks of cycle_count (10)
25742578
# and identify goodtime intervals and calculate background rates
@@ -2586,13 +2590,13 @@ def l1b_bgrates_and_goodtimes(
25862590
for index in range(0, max_row_count, cycle_count):
25872591
# Calculate the interval for this chunk
25882592
if (index + cycle_count - 1) < max_row_count:
2589-
interval = shcoarse[index + cycle_count - 1] - shcoarse[index]
2593+
interval = met[index + cycle_count - 1] - met[index]
25902594
else:
25912595
interval = interval_nom * max_row_count
25922596

25932597
logger.debug(
2594-
f"\n Index {index}: shcoarse[{index}]="
2595-
f"{shcoarse[index] if index < max_row_count else 'N/A'}, "
2598+
f"\n Index {index}: met[{index}]="
2599+
f"{met[index] if index < max_row_count else 'N/A'}, "
25962600
f"interval={interval}, begin={begin}"
25972601
)
25982602

@@ -2604,7 +2608,7 @@ def l1b_bgrates_and_goodtimes(
26042608
)
26052609
# If we were tracking a goodtime interval, close it before the gap
26062610
if begin > 0.0:
2607-
end = shcoarse[index - 1]
2611+
end = met[index - 1]
26082612
logger.debug(f" Closing interval before gap: {begin} -> {end}")
26092613

26102614
epochs[row_count] = l1b_histrates["epoch"][index - 1].values.item()
@@ -2624,14 +2628,14 @@ def l1b_bgrates_and_goodtimes(
26242628
# Check for time gap from previous chunk
26252629
delta_time = 0.0
26262630
if index > 0:
2627-
delta_time = shcoarse[index] - (shcoarse[index - 1] + 420)
2631+
delta_time = met[index] - (met[index - 1] + 420)
26282632
logger.debug(
26292633
f" Delta time from previous: {delta_time} (max: {delay_max})"
26302634
)
26312635

26322636
# If there's a gap and we have an active interval, close it
26332637
if (delta_time > delay_max) & (begin > 0.0):
2634-
end = shcoarse[index - 1]
2638+
end = met[index - 1]
26352639
logger.debug(f" Closing interval due to time gap: {begin} -> {end}")
26362640

26372641
epochs[row_count] = l1b_histrates["epoch"][index - 1].values.item()
@@ -2658,7 +2662,7 @@ def l1b_bgrates_and_goodtimes(
26582662
# If rate is below threshold, accumulate for background
26592663
if antiram_h_rate < h_bg_rate_nom:
26602664
if begin == 0.0:
2661-
begin = shcoarse[index]
2665+
begin = met[index]
26622666
logger.debug(f" Starting new interval at {begin}")
26632667

26642668
sum_h_bg_counts = sum_h_bg_counts + antiram_h_counts
@@ -2668,7 +2672,7 @@ def l1b_bgrates_and_goodtimes(
26682672
# If rate exceeds threshold, close the interval if one is active
26692673
if antiram_h_rate >= h_bg_rate_nom:
26702674
if begin > 0.0:
2671-
end = shcoarse[index - 1]
2675+
end = met[index - 1]
26722676
logger.debug(
26732677
f" Closing interval due to rate threshold: {begin} -> {end}"
26742678
)
@@ -2686,7 +2690,7 @@ def l1b_bgrates_and_goodtimes(
26862690

26872691
# Handle the final interval if one is still open
26882692
if (end == 0.0) & (begin > 0.0):
2689-
end = shcoarse[max_row_count - 1]
2693+
end = met[max_row_count - 1]
26902694
if end > begin:
26912695
epochs[row_count] = l1b_histrates["epoch"][max_row_count - 1]
26922696
goodtimes[row_count, :] = [int(begin - 620), int(end + 320)]
@@ -2720,12 +2724,12 @@ def l1b_bgrates_and_goodtimes(
27202724
o_bg_rate = o_bg_rate_nom * 0.3
27212725
o_bg_rate_variance = o_bg_rate
27222726

2723-
h_background_rate[0, :] = np.full(7, h_bg_rate)
2724-
h_background_rate_variance[0, :] = np.full(7, h_bg_rate_variance)
2725-
o_background_rate[0, :] = np.full(7, o_bg_rate)
2726-
o_background_rate_variance[0, :] = np.full(7, o_bg_rate_variance)
2727-
bg_start_met[0] = shcoarse[0]
2728-
bg_end_met[0] = shcoarse[max_row_count - 1]
2727+
h_background_rate[0, :] = np.full(NUM_ESA_STEPS, h_bg_rate)
2728+
h_background_rate_variance[0, :] = np.full(NUM_ESA_STEPS, h_bg_rate_variance)
2729+
o_background_rate[0, :] = np.full(NUM_ESA_STEPS, o_bg_rate)
2730+
o_background_rate_variance[0, :] = np.full(NUM_ESA_STEPS, o_bg_rate_variance)
2731+
bg_start_met[0] = met[0]
2732+
bg_end_met[0] = met[max_row_count - 1]
27292733

27302734
# Handle case where no goodtimes were found -- produce a
27312735
# single record with invalid times (the defaults above)
@@ -2808,7 +2812,7 @@ def l1b_bgrates_and_goodtimes(
28082812
# For now, set all ESA flags to 1 (good) since we don't have
28092813
# an algorithm for this yet
28102814
l1b_backgrounds_and_goodtimes_ds["esa_goodtime_flags"] = xr.DataArray(
2811-
data=np.zeros((row_count, 7), dtype=int) + 1,
2815+
data=np.zeros((row_count, NUM_ESA_STEPS), dtype=int) + 1,
28122816
name="E-step",
28132817
dims=["epoch", "esa_step"],
28142818
# attrs=attr_mgr_l1b.get_variable_attributes("esa_goodtime_flags"),

imap_processing/tests/lo/test_lo_l1b.py

Lines changed: 137 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1050,6 +1050,9 @@ def test_calculate_histogram_rates(l1b_histrates):
10501050
exposure_factors_60deg = np.zeros((2, 7, 6))
10511051
exposure_factors_6deg[0, 0, 0] = 1
10521052
exposure_factors_60deg[0, 0, 0] = 1
1053+
exposure_factors_6deg[0, 1, 0] = 0
1054+
exposure_factors_60deg[0, 1, 0] = 0
1055+
10531056
exposure_factors = {}
10541057
exposure_factors["6deg"] = exposure_factors_6deg
10551058
exposure_factors["60deg"] = exposure_factors_60deg
@@ -1615,7 +1618,7 @@ def test_filters_by_count_and_time_window(self, mock_repoint):
16151618
},
16161619
coords={"epoch": [0, 1, 2, 3, 4]},
16171620
)
1618-
# Time window: [5s, 25s] - should include epochs 1 and 2
1621+
# # Time window: [5s, 25s] - should include epochs 1 and 2
16191622
expected_mask = np.array([False, True, True, False, False])
16201623

16211624
# Act
@@ -1722,10 +1725,11 @@ def test_profile_for_group_end_bins_excluded(self):
17221725
assert count_per_bin[719] == 0
17231726
# All other bins should have count=2
17241727
assert np.all(count_per_bin[:718] == 2)
1725-
# End bins should have FILLVAL
1726-
assert np.all(np.isnan(avg_amplitude[718:]))
1727-
# Middle bins should have average value
1728+
# Averages should be 100 for all bins except the excluded ones
17281729
assert np.all(avg_amplitude[:718] == 100.0)
1730+
# Excluded bins should be NaN
1731+
assert np.isnan(avg_amplitude[718])
1732+
assert np.isnan(avg_amplitude[719])
17291733

17301734
def test_profile_for_group_empty_data(self):
17311735
"""Test handling of empty data array."""
@@ -1751,6 +1755,7 @@ def test_profiles_by_group_creates_correct_groups(self, mock_repoint):
17511755
mock_repoint.return_value = pd.DataFrame(
17521756
{"repoint_in_progress": [False] * n_records}
17531757
)
1758+
met_times = np.arange(n_records, dtype=np.float64) * 15.0
17541759
l1a_star = xr.Dataset(
17551760
{
17561761
"count": ("epoch", [720] * n_records),
@@ -1764,7 +1769,7 @@ def test_profiles_by_group_creates_correct_groups(self, mock_repoint):
17641769
),
17651770
},
17661771
coords={
1767-
"epoch": met_to_ttj2000ns(np.arange(n_records) * 15.0),
1772+
"epoch": met_to_ttj2000ns(met_times),
17681773
"samples": np.arange(720),
17691774
},
17701775
)
@@ -1887,7 +1892,10 @@ def test_initializes_with_spin_data(
18871892
l1a_star = xr.Dataset(
18881893
{
18891894
"count": ("epoch", [720] * n_records),
1890-
"shcoarse": ("epoch", met_times),
1895+
"shcoarse": (
1896+
"epoch",
1897+
np.arange(n_records, dtype=np.float64) * 15.0,
1898+
),
18911899
"data": (
18921900
("epoch", "samples"),
18931901
np.random.randint(100, 200, size=(n_records, 720), dtype=np.uint16),
@@ -2111,7 +2119,10 @@ def test_multiple_groups_created(
21112119
l1a_star = xr.Dataset(
21122120
{
21132121
"count": ("epoch", [720] * n_records),
2114-
"shcoarse": ("epoch", met_times),
2122+
"shcoarse": (
2123+
"epoch",
2124+
np.arange(n_records, dtype=np.float64) * 15.0,
2125+
),
21152126
"data": (
21162127
("epoch", "samples"),
21172128
np.ones((n_records, 720), dtype=np.uint16) * 100,
@@ -2582,26 +2593,34 @@ def test_split_backgrounds_and_goodtimes_dataset(attr_mgr_l1b):
25822593
assert "h_background_variance" in bgrates_ds.data_vars
25832594
assert "o_background_rates" in bgrates_ds.data_vars
25842595
assert "o_background_variance" in bgrates_ds.data_vars
2585-
assert "start_met" in bgrates_ds.data_vars # Included in both datasets
2586-
assert "end_met" in bgrates_ds.data_vars # Included in both datasets
2587-
assert "bin_start" in bgrates_ds.data_vars # Included in both datasets
2588-
assert "bin_end" in bgrates_ds.data_vars # Included in both datasets
2589-
assert "esa_goodtime_flags" not in bgrates_ds.data_vars # Only in goodtimes
2596+
# Note: bgrates uses 'met' dimension, goodtimes has epoch in data vars
25902597

2591-
# Assert - Check goodtimes dataset has goodtime fields
2598+
# Check goodtimes dataset structure
25922599
assert "gt_start_met" in goodtimes_ds.data_vars
25932600
assert "gt_end_met" in goodtimes_ds.data_vars
25942601
assert "bin_start" in goodtimes_ds.data_vars
25952602
assert "bin_end" in goodtimes_ds.data_vars
25962603
assert "esa_goodtime_flags" in goodtimes_ds.data_vars
2597-
assert "h_background_rates" not in goodtimes_ds.data_vars # Only in bgrates
2598-
assert "h_background_variance" not in goodtimes_ds.data_vars # Only in bgrates
2599-
assert "o_background_rates" not in goodtimes_ds.data_vars # Only in bgrates
2600-
assert "o_background_variance" not in goodtimes_ds.data_vars # Only in bgrates
26012604

2602-
# Assert - Check global attributes were set
2603-
assert "Logical_source" in bgrates_ds.attrs
2604-
assert "Logical_source" in goodtimes_ds.attrs
2605+
# Check dimensions
2606+
assert bgrates_ds["h_background_rates"].dims == ("met", "esa_step")
2607+
assert bgrates_ds["h_background_rates"].shape[1] == 7 # 7 ESA steps
2608+
2609+
# Check that goodtime intervals were created
2610+
assert len(goodtimes_ds["gt_start_met"]) > 0
2611+
assert len(goodtimes_ds["gt_end_met"]) > 0
2612+
2613+
# Check that start times are before end times
2614+
assert np.all(
2615+
goodtimes_ds["gt_start_met"].values <= goodtimes_ds["gt_end_met"].values
2616+
)
2617+
2618+
# Check bin_start and bin_end values
2619+
assert np.all(goodtimes_ds["bin_start"].values == 0)
2620+
assert np.all(goodtimes_ds["bin_end"].values == 59)
2621+
2622+
# Check ESA goodtime flags are all 1 (good)
2623+
assert np.all(goodtimes_ds["esa_goodtime_flags"].values == 1)
26052624

26062625

26072626
def test_l1b_bgrates_and_goodtimes_azimuth_bins(attr_mgr_l1b):
@@ -2870,3 +2889,101 @@ def test_l1b_bgrates_and_goodtimes_rate_transition_high_to_low_to_high(attr_mgr_
28702889
# Background rates should be positive for all intervals
28712890
assert np.all(l1b_bgrates_ds["h_background_rates"].values > 0)
28722891
assert np.all(l1b_bgrates_ds["o_background_rates"].values > 0)
2892+
2893+
2894+
def test_l1b_bgrates_and_goodtimes_large_interval_with_active_tracking(attr_mgr_l1b):
2895+
"""
2896+
Test that an active goodtime interval is properly closed when a large interval
2897+
gap is encountered.
2898+
2899+
This test ensures the code path where:
2900+
1. We're actively tracking an interval (begin > 0.0)
2901+
2. A chunk with interval > (interval_nom + delay_max) is encountered
2902+
3. The active interval is closed before skipping the gap
2903+
"""
2904+
# Arrange - Create dataset where we start tracking, then hit a large interval
2905+
cycle_count = 10
2906+
delay_max = 840
2907+
met_spacing = 42
2908+
2909+
# First: Create enough low-rate chunks to start tracking (begin > 0.0)
2910+
num_chunks_before_gap = 2 # 2 chunks of 10 epochs each = 20 epochs
2911+
epochs_per_chunk = 10
2912+
num_epochs_first = num_chunks_before_gap * epochs_per_chunk
2913+
2914+
met_start = 473389200
2915+
met_times_first = np.arange(
2916+
met_start, met_start + num_epochs_first * met_spacing, met_spacing
2917+
)
2918+
2919+
# Second: Create a chunk where the interval is too large
2920+
# The interval is measured from the first epoch of the chunk to the last
2921+
# We need interval > (interval_nom + delay_max) = 4200 + 840 = 5040
2922+
large_gap = 6000 # Larger than threshold
2923+
met_times_gap_chunk_start = met_times_first[-1] + met_spacing
2924+
2925+
# Create the problematic chunk (10 more epochs)
2926+
met_times_gap_chunk = np.arange(
2927+
met_times_gap_chunk_start,
2928+
met_times_gap_chunk_start + epochs_per_chunk * met_spacing,
2929+
met_spacing,
2930+
)
2931+
2932+
met_times_gap_chunk_adjusted = met_times_gap_chunk.copy()
2933+
met_times_gap_chunk_adjusted[-1] = met_times_gap_chunk[0] + large_gap
2934+
2935+
# Third: Add more normal data after the gap
2936+
met_times_after = np.arange(
2937+
met_times_gap_chunk_adjusted[-1] + met_spacing,
2938+
met_times_gap_chunk_adjusted[-1] + met_spacing + 200 * met_spacing,
2939+
met_spacing,
2940+
)
2941+
2942+
met_times = np.concatenate(
2943+
[met_times_first, met_times_gap_chunk_adjusted, met_times_after]
2944+
)
2945+
epoch_times = met_to_ttj2000ns(met_times)
2946+
2947+
# All counts are low (below h_bg_rate_nom = 0.0028) to ensure we start tracking
2948+
h_counts = np.ones((len(met_times), 7, 60)) * 0.00025
2949+
o_counts = np.ones((len(met_times), 7, 60)) * 0.000025
2950+
2951+
l1b_histrates = xr.Dataset(
2952+
{
2953+
"h_counts": (("epoch", "esa_step", "spin_bin_6"), h_counts),
2954+
"o_counts": (("epoch", "esa_step", "spin_bin_6"), o_counts),
2955+
},
2956+
coords={
2957+
"epoch": epoch_times,
2958+
"esa_step": np.arange(1, 8),
2959+
"spin_bin_6": np.arange(60),
2960+
},
2961+
)
2962+
2963+
sci_dependencies = {"imap_lo_l1b_histrates": l1b_histrates}
2964+
2965+
# Act
2966+
result = l1b_bgrates_and_goodtimes(
2967+
sci_dependencies, attr_mgr_l1b, cycle_count=cycle_count, delay_max=delay_max
2968+
)
2969+
2970+
# Assert
2971+
l1b_bgrates_ds, l1b_goodtimes_ds = result
2972+
2973+
# Should have created at least 2 intervals:
2974+
# 1. The interval that was closed before the gap
2975+
# 2. The interval after the gap
2976+
assert len(l1b_goodtimes_ds["gt_start_met"]) >= 2
2977+
2978+
# The first interval should end before the gap chunk
2979+
# (it should be closed when we detect the large interval)
2980+
first_interval_end = l1b_goodtimes_ds["gt_end_met"].values[0]
2981+
gap_chunk_start = met_times_gap_chunk_adjusted[0]
2982+
2983+
# The first interval should end before the gap chunk starts
2984+
# (with the +320 offset applied in the code)
2985+
assert first_interval_end < gap_chunk_start + 320
2986+
2987+
# Verify background rates are valid
2988+
assert np.all(l1b_bgrates_ds["h_background_rates"].values > 0)
2989+
assert np.all(l1b_bgrates_ds["o_background_rates"].values > 0)

0 commit comments

Comments
 (0)