Skip to content

Commit 1ab6de9

Browse files
m9hclaude
andcommitted
Add pseudo-CT validation pipeline with TDD
Validates MRI → pseudo-CT (HU) → acoustic property chain: - t1_to_pseudo_ct: Plymouth method (dark T1 → high HU) - hu_to_acoustic_properties: Schneider/Mast empirical relationships - compute_validation_metrics: MAE, RMSE, correlation, Dice, acoustic error - Supports paired MRI/CT datasets (SynthRAD2023, BabelBrain) 6/6 tests GREEN: HU range, T1-bone correspondence, physical acoustic properties, zero-error baseline, noise tolerance, skull mask extraction. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
1 parent 39735a3 commit 1ab6de9

2 files changed

Lines changed: 303 additions & 0 deletions

File tree

benchmarks/pseudo_ct_validation.py

Lines changed: 216 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,216 @@
1+
"""Pseudo-CT validation: compare MRI-derived HU against real CT.
2+
3+
Validates the chain: T1 MRI → pseudo-CT (HU) → acoustic properties
4+
using datasets with paired MRI and CT scans.
5+
6+
Supported datasets:
7+
- SynthRAD2023: 180 paired T1/CT brain scans
8+
- BabelBrain: 5 subjects with varying skull density ratios
9+
- Any paired MRI/CT in NIfTI format
10+
11+
Usage:
12+
.venv/bin/python benchmarks/pseudo_ct_validation.py --mri t1.nii.gz --ct ct.nii.gz
13+
"""
14+
from __future__ import annotations
15+
16+
import argparse
17+
import logging
18+
from pathlib import Path
19+
20+
import numpy as np
21+
22+
logging.basicConfig(level=logging.INFO, format="%(levelname)s %(name)s: %(message)s")
23+
log = logging.getLogger(__name__)
24+
25+
26+
def load_nifti(path: str) -> tuple[np.ndarray, np.ndarray]:
27+
"""Load NIfTI volume. Returns (data, affine)."""
28+
import nibabel as nib
29+
img = nib.load(path)
30+
return np.asarray(img.get_fdata(), dtype=np.float32), img.affine
31+
32+
33+
def t1_to_pseudo_ct(t1: np.ndarray, method: str = "plymouth") -> np.ndarray:
34+
"""Convert T1w MRI to pseudo-CT Hounsfield units.
35+
36+
Plymouth method: inverse relationship (dark in T1 = dense bone = high HU).
37+
"""
38+
# Normalize T1 to [0, 1]
39+
t1_norm = (t1 - t1.min()) / (t1.max() - t1.min() + 1e-8)
40+
41+
if method == "plymouth":
42+
# T1: bone is dark (low signal) → high HU
43+
hu = 1700.0 * (1.0 - t1_norm) + 300.0
44+
elif method == "linear":
45+
# Simple linear: higher T1 → lower HU (bone is dark in T1)
46+
hu = 2000.0 * (1.0 - t1_norm)
47+
else:
48+
raise ValueError(f"Unknown method: {method}")
49+
50+
return hu
51+
52+
53+
def compute_skull_mask(ct: np.ndarray, hu_min: float = 300, hu_max: float = 3000) -> np.ndarray:
54+
"""Extract skull mask from CT using HU thresholds."""
55+
return (ct >= hu_min) & (ct <= hu_max)
56+
57+
58+
def hu_to_acoustic_properties(hu: np.ndarray) -> dict[str, np.ndarray]:
59+
"""Convert HU to acoustic properties using empirical relationships.
60+
61+
References:
62+
- Schneider et al. (2000): HU → density
63+
- Mast (2000): density → sound speed, attenuation
64+
- Connor et al. (2002): combined relationships
65+
"""
66+
# Schneider: density from HU
67+
# rho = 1.0 + 0.000790 * HU (for HU > 0, soft tissue/bone)
68+
rho = np.where(hu > 0,
69+
1000.0 * (1.0 + 0.000790 * hu),
70+
1000.0) # kg/m³
71+
72+
# Mast: sound speed from density (simplified)
73+
# c = 1500 + 2.5 * (rho - 1000) for soft tissue
74+
# c = 1500 + 3.7 * (rho - 1000) for bone
75+
bone_mask = hu > 300
76+
c = np.where(bone_mask,
77+
1500.0 + 3.7 * (rho - 1000.0),
78+
1500.0 + 2.5 * (rho - 1000.0))
79+
c = np.clip(c, 1400, 4500)
80+
81+
# Attenuation: roughly proportional to density deviation
82+
alpha = np.where(bone_mask,
83+
4.0 * (rho - 1000.0) / 900.0, # scale to ~4 dB/cm for cortical
84+
0.3 * (rho - 1000.0) / 40.0) # scale to ~0.3 for brain
85+
alpha = np.clip(alpha, 0.0, 10.0)
86+
87+
return {"sound_speed": c, "density": rho, "attenuation": alpha}
88+
89+
90+
def compute_validation_metrics(
91+
pseudo_hu: np.ndarray,
92+
real_hu: np.ndarray,
93+
skull_mask: np.ndarray | None = None,
94+
) -> dict:
95+
"""Compare pseudo-CT against real CT.
96+
97+
Args:
98+
pseudo_hu: Predicted HU from MRI
99+
real_hu: Ground-truth HU from CT
100+
skull_mask: Optional mask to restrict comparison to skull region
101+
102+
Returns:
103+
Dict of metrics: MAE, RMSE, correlation, Dice for bone segmentation
104+
"""
105+
if skull_mask is not None:
106+
p = pseudo_hu[skull_mask]
107+
r = real_hu[skull_mask]
108+
else:
109+
# Use all non-zero voxels
110+
mask = (real_hu > -500) & (pseudo_hu > -500)
111+
p = pseudo_hu[mask]
112+
r = real_hu[mask]
113+
114+
if len(p) == 0 or len(r) == 0:
115+
return {"error": "No valid voxels for comparison"}
116+
117+
mae = float(np.mean(np.abs(p - r)))
118+
rmse = float(np.sqrt(np.mean((p - r) ** 2)))
119+
corr = float(np.corrcoef(p.ravel(), r.ravel())[0, 1]) if len(p) > 1 else 0.0
120+
121+
# Dice coefficient for bone segmentation (HU > 300)
122+
pred_bone = pseudo_hu > 300
123+
real_bone = real_hu > 300
124+
intersection = np.sum(pred_bone & real_bone)
125+
dice = 2 * intersection / (np.sum(pred_bone) + np.sum(real_bone) + 1e-8)
126+
127+
# Acoustic property error in skull region
128+
skull = real_hu > 300
129+
if skull.any():
130+
pred_props = hu_to_acoustic_properties(pseudo_hu[skull])
131+
real_props = hu_to_acoustic_properties(real_hu[skull])
132+
c_error = float(np.mean(np.abs(pred_props["sound_speed"] - real_props["sound_speed"])))
133+
rho_error = float(np.mean(np.abs(pred_props["density"] - real_props["density"])))
134+
else:
135+
c_error = rho_error = 0.0
136+
137+
return {
138+
"mae_hu": mae,
139+
"rmse_hu": rmse,
140+
"correlation": corr,
141+
"dice_bone": float(dice),
142+
"n_voxels": int(len(p)),
143+
"c_mae_ms": c_error,
144+
"rho_mae_kgm3": rho_error,
145+
}
146+
147+
148+
def run_validation(mri_path: str, ct_path: str, method: str = "plymouth",
149+
save_png: str | None = None) -> dict:
150+
"""Full validation pipeline: load MRI/CT, predict pseudo-CT, compare."""
151+
log.info("Loading MRI: %s", mri_path)
152+
mri, _ = load_nifti(mri_path)
153+
log.info("Loading CT: %s", ct_path)
154+
ct, _ = load_nifti(ct_path)
155+
156+
if mri.shape != ct.shape:
157+
log.warning("Shape mismatch: MRI=%s, CT=%s — resampling needed", mri.shape, ct.shape)
158+
159+
log.info("Computing pseudo-CT (method=%s)...", method)
160+
pseudo = t1_to_pseudo_ct(mri, method=method)
161+
162+
log.info("Computing validation metrics...")
163+
metrics = compute_validation_metrics(pseudo, ct)
164+
165+
log.info("Results:")
166+
for k, v in metrics.items():
167+
log.info(" %s: %s", k, v)
168+
169+
if save_png:
170+
_plot_comparison(mri, ct, pseudo, metrics, save_png)
171+
172+
return metrics
173+
174+
175+
def _plot_comparison(mri, ct, pseudo, metrics, save_png):
176+
import matplotlib
177+
matplotlib.use("Agg")
178+
import matplotlib.pyplot as plt
179+
180+
mid = mri.shape[2] // 2
181+
fig, axes = plt.subplots(1, 4, figsize=(16, 4))
182+
183+
ax = axes[0]
184+
ax.imshow(mri[:, :, mid].T, origin="lower", cmap="gray")
185+
ax.set_title("T1 MRI")
186+
187+
ax = axes[1]
188+
im = ax.imshow(ct[:, :, mid].T, origin="lower", cmap="bone", vmin=-200, vmax=2000)
189+
ax.set_title("Real CT (HU)")
190+
fig.colorbar(im, ax=ax, shrink=0.8)
191+
192+
ax = axes[2]
193+
im = ax.imshow(pseudo[:, :, mid].T, origin="lower", cmap="bone", vmin=-200, vmax=2000)
194+
ax.set_title("Pseudo-CT (HU)")
195+
fig.colorbar(im, ax=ax, shrink=0.8)
196+
197+
ax = axes[3]
198+
diff = pseudo[:, :, mid] - ct[:, :, mid]
199+
im = ax.imshow(diff.T, origin="lower", cmap="RdBu_r", vmin=-500, vmax=500)
200+
ax.set_title(f"Difference (MAE={metrics['mae_hu']:.0f} HU)")
201+
fig.colorbar(im, ax=ax, shrink=0.8)
202+
203+
fig.suptitle(f"Pseudo-CT Validation (Dice={metrics['dice_bone']:.3f}, r={metrics['correlation']:.3f})")
204+
fig.tight_layout()
205+
fig.savefig(save_png, dpi=150)
206+
log.info("Saved %s", save_png)
207+
208+
209+
if __name__ == "__main__":
210+
parser = argparse.ArgumentParser()
211+
parser.add_argument("--mri", required=True, help="Path to T1 MRI NIfTI")
212+
parser.add_argument("--ct", required=True, help="Path to CT NIfTI")
213+
parser.add_argument("--method", default="plymouth", choices=["plymouth", "linear"])
214+
parser.add_argument("--save-png", default=None)
215+
args = parser.parse_args()
216+
run_validation(args.mri, args.ct, args.method, args.save_png)

tests/test_pseudo_ct.py

Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
"""TDD: Pseudo-CT validation pipeline."""
2+
import numpy as np
3+
import pytest
4+
5+
6+
def test_t1_to_pseudo_ct_range():
7+
"""Pseudo-CT should produce physically reasonable HU values."""
8+
from benchmarks.pseudo_ct_validation import t1_to_pseudo_ct
9+
10+
# Synthetic T1: bright tissue, dark bone
11+
t1 = np.random.rand(20, 20, 20).astype(np.float32) * 1000
12+
hu = t1_to_pseudo_ct(t1, method="plymouth")
13+
14+
assert hu.shape == t1.shape
15+
assert hu.min() >= 200, f"HU too low: {hu.min()}"
16+
assert hu.max() <= 2100, f"HU too high: {hu.max()}"
17+
18+
19+
def test_t1_dark_regions_give_high_hu():
20+
"""In T1, bone is dark → pseudo-CT should assign high HU to dark regions."""
21+
from benchmarks.pseudo_ct_validation import t1_to_pseudo_ct
22+
23+
t1 = np.ones((20, 20, 20), dtype=np.float32) * 500
24+
t1[8:12, 8:12, 8:12] = 50 # dark region (bone-like)
25+
26+
hu = t1_to_pseudo_ct(t1, method="plymouth")
27+
# Dark region should have higher HU than bright region
28+
assert hu[10, 10, 10] > hu[0, 0, 0], "Dark T1 should map to higher HU"
29+
30+
31+
def test_hu_to_acoustic_properties_physical():
32+
"""Acoustic properties from HU should be in physical range."""
33+
from benchmarks.pseudo_ct_validation import hu_to_acoustic_properties
34+
35+
hu = np.array([0, 100, 500, 1000, 2000], dtype=np.float32)
36+
props = hu_to_acoustic_properties(hu)
37+
38+
# Sound speed: 1400-4500 m/s
39+
assert np.all(props["sound_speed"] >= 1400)
40+
assert np.all(props["sound_speed"] <= 4500)
41+
42+
# Density: water (1000) to bone (2500)
43+
assert np.all(props["density"] >= 1000)
44+
45+
# Bone HU should give higher c than water HU
46+
assert props["sound_speed"][-1] > props["sound_speed"][0]
47+
48+
49+
def test_compute_validation_metrics_identical():
50+
"""Comparing identical fields should give zero error."""
51+
from benchmarks.pseudo_ct_validation import compute_validation_metrics
52+
53+
hu = np.random.rand(20, 20, 20).astype(np.float32) * 2000
54+
metrics = compute_validation_metrics(hu, hu)
55+
56+
assert metrics["mae_hu"] < 0.01
57+
assert metrics["rmse_hu"] < 0.01
58+
assert metrics["correlation"] > 0.999
59+
assert metrics["dice_bone"] > 0.999
60+
61+
62+
def test_compute_validation_metrics_with_error():
63+
"""Adding noise should produce nonzero but bounded error."""
64+
from benchmarks.pseudo_ct_validation import compute_validation_metrics
65+
66+
rng = np.random.default_rng(42)
67+
real = rng.uniform(0, 2000, (20, 20, 20)).astype(np.float32)
68+
pred = real + rng.normal(0, 100, real.shape).astype(np.float32) # ~100 HU noise
69+
70+
metrics = compute_validation_metrics(pred, real)
71+
72+
assert 50 < metrics["mae_hu"] < 200, f"MAE unexpected: {metrics['mae_hu']}"
73+
assert metrics["correlation"] > 0.8, f"Correlation too low: {metrics['correlation']}"
74+
75+
76+
def test_skull_mask_extraction():
77+
"""Skull mask should isolate bone-density voxels."""
78+
from benchmarks.pseudo_ct_validation import compute_skull_mask
79+
80+
ct = np.zeros((20, 20, 20), dtype=np.float32)
81+
ct[5:15, 5:15, 5:15] = 1000 # bone
82+
ct[7:13, 7:13, 7:13] = 50 # brain (inside)
83+
84+
mask = compute_skull_mask(ct, hu_min=300)
85+
assert mask[10, 10, 5] == True # bone
86+
assert mask[10, 10, 10] == False # brain
87+
assert mask[0, 0, 0] == False # air

0 commit comments

Comments
 (0)