Skip to content

Commit 9adcabb

Browse files
committed
FINALIZED gate: prevent segment activation before boundaries known
Replace the old profile-valid + queue-depth gates with a single FINALIZED gate that checks optimization_state >= TC_PLAN_FINALIZED before allowing RT to activate any segment. - Forward pass only stamps FINALIZED when exit boundary conditions are known: EXACT segments (vf=0 always correct), segments with a successor in queue, or tail segments when queue is sealed. - SKIP path re-stamps FINALIZED after backward pass knocks state back to SMOOTHED. - queue_sealed flag in TP_STRUCT: set by tpFlushCompressor_9D at sync points (dwell, mode change, program end), cleared by tpAddLine_9D/tpAddCircle_9D when new motion arrives. Lets the optimizer finalize the tail segment immediately instead of waiting for a successor that will never come. - 200ms safety-net timeout for cases not covered by the seal (first segment after tool change, program start). - Cleanup: removed debug probes (GATE_DBG, ACTIVATE_DBG, QUEUE_DBG, OPT_DBG, SEAL_DBG, XING_DBG, FWD_VF_DBG), stale active-segment rewrite, pessimistic first-profile hack.
1 parent 787b80c commit 9adcabb

4 files changed

Lines changed: 92 additions & 70 deletions

File tree

src/emc/motion_planning/motion_planning_9d.cc

Lines changed: 31 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ static TC_STRUCT * tcqItem_user(TC_QUEUE_STRUCT * const tcq, int n)
115115
// Replan flag: set whenever the queue changes (segment added, feed change,
116116
// merge) so that replanForward() runs on the next tick. Cleared when a
117117
// full forward pass completes without budget expiry.
118-
static bool g_needs_replan = false;
118+
bool g_needs_replan = false;
119119

120120
/**
121121
* @brief Userspace version of tcqPut for planner_type 2
@@ -2352,6 +2352,18 @@ static int replanForward(TP_STRUCT *tp, double v0_override, double budget_sec)
23522352
if (entry_ok && feed_ok && exit_ok) {
23532353
// Profile is clean — propagate its actual exit and continue
23542354
fo_skip_count++;
2355+
// Re-stamp FINALIZED if backward pass knocked it back to SMOOTHED
2356+
bool boundaries_known_skip = (tc->term_cond == TC_TERM_COND_EXACT)
2357+
|| (i + 1 < queue_len)
2358+
|| (tp->queue_sealed && i == queue_len - 1);
2359+
if (boundaries_known_skip) {
2360+
int cur_state = __atomic_load_n(
2361+
(int*)&tc->shared_9d.optimization_state, __ATOMIC_ACQUIRE);
2362+
if (cur_state < TC_PLAN_FINALIZED) {
2363+
atomicStoreInt((int*)&tc->shared_9d.optimization_state,
2364+
TC_PLAN_FINALIZED);
2365+
}
2366+
}
23552367
if (tc->term_cond == TC_TERM_COND_TANGENT) {
23562368
double pu = profileExitVelUnscaled(&tc->shared_9d.profile);
23572369
prev_exit_vel = pu * tc->shared_9d.profile.computed_feed_scale;
@@ -2377,19 +2389,6 @@ static int replanForward(TP_STRUCT *tp, double v0_override, double budget_sec)
23772389
continue;
23782390
}
23792391

2380-
// --- Fix 4: pessimistic first profile ---
2381-
// On first profile (no prior valid profile), force exit=0 so RT always
2382-
// gets a safe profile. Raises on subsequent calls once the backward pass
2383-
// has converged. Exception: bezier blends have deterministic exits.
2384-
bool is_first_profile = !tc->shared_9d.profile.valid;
2385-
if (is_first_profile && scaled_v_exit > 0.0) {
2386-
TC_STRUCT *next_seg = (i + 1 < queue_len) ? tcqItem_user(queue, i + 1) : NULL;
2387-
bool next_is_blend = (next_seg && next_seg->motion_type == TC_BEZIER);
2388-
if (tc->motion_type != TC_BEZIER && !next_is_blend) {
2389-
scaled_v_exit = 0.0;
2390-
desired_fvel = 0.0;
2391-
}
2392-
}
23932392

23942393
// --- Feed hold: write zero-velocity stopped profile ---
23952394
// When feed is effectively 0 (feed hold), skip Ruckig (can't traverse
@@ -2515,7 +2514,18 @@ static int replanForward(TP_STRUCT *tp, double v0_override, double budget_sec)
25152514
fo_dirty_count++;
25162515
tc->shared_9d.profile.dbg_src = 2; // forward pass
25172516
tc->shared_9d.profile.dbg_v0_req = scaled_v_entry;
2518-
atomicStoreInt((int*)&tc->shared_9d.optimization_state, TC_PLAN_FINALIZED);
2517+
// Only finalize when exit boundary conditions are known.
2518+
// STOP segments may be promoted to TANGENT when the next
2519+
// segment creates a blend, so require a successor in queue.
2520+
// EXACT (last segment of program) always has correct vf=0.
2521+
// queue_sealed: interpreter at sync point, no successor coming.
2522+
bool has_successor = (i + 1 < queue_len);
2523+
bool sealed_tail = (tp->queue_sealed && i == queue_len - 1);
2524+
bool boundaries_known = (tc->term_cond == TC_TERM_COND_EXACT)
2525+
|| has_successor || sealed_tail;
2526+
if (boundaries_known) {
2527+
atomicStoreInt((int*)&tc->shared_9d.optimization_state, TC_PLAN_FINALIZED);
2528+
}
25192529
if (tc->term_cond == TC_TERM_COND_TANGENT) {
25202530
double pu = profileExitVelUnscaled(&tc->shared_9d.profile);
25212531
prev_exit_vel = pu * tc->shared_9d.profile.computed_feed_scale;
@@ -4053,8 +4063,11 @@ extern "C" int tpOptimizePlannedMotions_9D(TP_STRUCT * const tp, int /*optimizat
40534063
if (g_feed_mgr.committed_feed < 0.001 && g_feed_mgr.planning.isOpen())
40544064
return 0;
40554065

4056-
// Run unlimited replan — processes all segments needing recomputation.
4057-
(void)replanForward(tp, -1.0, 0.0 /* unlimited */);
4066+
// Budget-limited replan: don't block the interpreter for the entire queue.
4067+
// The servo cycle (checkFeedOverride → g_needs_replan) picks up remaining
4068+
// segments incrementally. 2ms is enough to process the tail segments
4069+
// (new segment + neighbors) while clean segments SKIP in ~50ns each.
4070+
(void)replanForward(tp, -1.0, 0.002);
40584071

40594072
return 0;
40604073
}
@@ -4101,6 +4114,7 @@ extern "C" int tpClearPlanning_9D(TP_STRUCT * const tp)
41014114
// causing non-deterministic behavior between identical runs.
41024115
g_feed_mgr.reset();
41034116
g_needs_replan = false;
4117+
tp->queue_sealed = 0;
41044118

41054119
return 0;
41064120
}

src/emc/motion_planning/motion_planning_9d_userspace.cc

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,9 @@ extern "C" {
4444
extern emcmot_config_t *emcmotConfig;
4545
}
4646

47+
// Replan flag (defined in motion_planning_9d.cc)
48+
extern bool g_needs_replan;
49+
4750
// ── Segment Compressor ──────────────────────────────────────────────────────
4851
// Buffers consecutive near-collinear G1 segments and emits a single compressed
4952
// line within G64 P tolerance. Eliminates artificial kink velocities from
@@ -236,7 +239,16 @@ static int compressorFlush(TP_STRUCT *tp)
236239
*/
237240
extern "C" int tpFlushCompressor_9D(TP_STRUCT *tp)
238241
{
239-
return compressorFlush(tp);
242+
int result = compressorFlush(tp);
243+
244+
// Seal the queue: interpreter is at a sync point (tool change, dwell,
245+
// mode change, program end). No more motion segments are coming until
246+
// the next tpAddLine_9D/tpAddCircle_9D call clears this flag.
247+
// This lets the optimizer finalize the tail segment immediately.
248+
tp->queue_sealed = 1;
249+
g_needs_replan = true;
250+
251+
return result;
240252
}
241253

242254
/**
@@ -1337,6 +1349,9 @@ extern "C" int tpAddLine_9D(
13371349
return -1;
13381350
}
13391351

1352+
// New motion arriving — unseal the queue.
1353+
tp->queue_sealed = 0;
1354+
13401355
// First segment of a new program: reset userspace planning state.
13411356
// tpClear() runs in RT context where tpClearPlanning_9D is unavailable,
13421357
// so we detect program-start here by an empty queue.
@@ -1585,6 +1600,9 @@ extern "C" int tpAddCircle_9D(
15851600
return -1;
15861601
}
15871602

1603+
// New motion arriving — unseal the queue.
1604+
tp->queue_sealed = 0;
1605+
15881606
// Flush any buffered compressed segments before adding a circle
15891607
if (g_compressor.active) {
15901608
compressorFlush(tp);

src/emc/tp/tp.c

Lines changed: 34 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -563,6 +563,7 @@ int tpClear(TP_STRUCT * const tp)
563563
tp->aborting = 0;
564564
tp->pausing = 0;
565565
tp->abort_profiles_written = 0;
566+
tp->queue_sealed = 0;
566567
tp->reverse_run = 0;
567568
tp->synchronized = 0;
568569
tp->uu_per_rev = 0.0;
@@ -4868,7 +4869,6 @@ STATIC int tpHandleSplitCycle(TP_STRUCT * const tp, TC_STRUCT * const tc,
48684869
tc->currentjerk = cj;
48694870
}
48704871

4871-
48724872
} else {
48734873
tc->progress = tcGetTarget(tc, tp->reverse_run);
48744874
}
@@ -5252,59 +5252,41 @@ int tpRunCycle(TP_STRUCT * const tp, long period)
52525252
* Solution: Wait for Ruckig profile validity AND queue depth before starting.
52535253
*/
52545254
if (GET_TRAJ_PLANNER_TYPE() == 2) {
5255-
int queue_len = tcqLen(&tp->queue);
5256-
5257-
/* Wait for Ruckig profile to be computed before starting ANY segment.
5258-
* This prevents trapezoidal→Ruckig switch mid-execution which causes
5259-
* position discontinuity in cycles 2-8 after motion start. */
5260-
if (!__atomic_load_n(&tc->shared_9d.profile.valid, __ATOMIC_ACQUIRE) && tc->progress < TP_POS_EPSILON) {
5261-
/* Abort bypass: don't block abort behind profile wait */
5262-
if (tp->aborting) {
5263-
goto past_gates;
5264-
}
5265-
static int profile_wait_count = 0;
5266-
profile_wait_count++;
5267-
5268-
/* Wait up to 200 cycles (200ms at 1kHz) for profile */
5269-
if (profile_wait_count < 200) {
5270-
return TP_ERR_WAITING;
5271-
}
5272-
/* Timeout - userspace too slow, will use trapezoidal for entire segment */
5273-
rtapi_print_msg(RTAPI_MSG_WARN,
5274-
"Ruckig profile timeout seg=%d, using trapezoidal fallback\n", tc->id);
5275-
profile_wait_count = 0;
5276-
}
5277-
5278-
/* Queue depth gate: wait for at least 2 segments before activating.
5279-
* When a segment is alone in the queue (queue_len < 2), its profile
5280-
* may have been computed with v_exit=0 (no successor/blend yet).
5281-
* The gate holds RT until the interpreter adds the next segment,
5282-
* giving the optimizer a chance to recompute with the correct
5283-
* exit velocity. The segment is NOT active during the gate,
5284-
* so the optimizer's active-segment skip doesn't prevent correction.
5255+
/* Boundary-condition gate: wait for the optimizer to finalize the
5256+
* profile before activating any segment. TC_PLAN_FINALIZED is only
5257+
* set when the forward pass has computed a profile with known exit
5258+
* boundary conditions — either vf=0 for EXACT/STOP segments, or
5259+
* with a valid successor present for TANGENT segments.
52855260
*
5286-
* Skip the gate for EXACT segments (last segment of program) —
5287-
* their v_exit=0 is always correct, no correction needed. */
5288-
if (tc->progress < TP_POS_EPSILON && queue_len < 2 && nexttc == NULL
5289-
&& tc->term_cond != TC_TERM_COND_EXACT) {
5290-
/* Abort bypass: don't block abort behind queue gate */
5291-
if (tp->aborting) {
5292-
goto past_gates;
5293-
}
5294-
static int gate_seg_id = -1;
5295-
static int gate_wait_count = 0;
5296-
/* Reset counter for each new segment */
5297-
if (tc->id != gate_seg_id) {
5298-
gate_seg_id = tc->id;
5299-
gate_wait_count = 0;
5300-
}
5301-
gate_wait_count++;
5302-
5303-
/* Wait up to 20 cycles (20ms at 1kHz) for a successor */
5304-
if (gate_wait_count < 20) {
5305-
return TP_ERR_WAITING;
5261+
* This single gate replaces both the old profile-valid gate and the
5262+
* queue-depth gate. It ensures RT never executes a profile that was
5263+
* computed before the planner knew what comes next. */
5264+
if (tc->progress < TP_POS_EPSILON && !tc->active) {
5265+
static int finalized_seg_id = -1;
5266+
static int finalized_wait_count = 0;
5267+
int opt_state = __atomic_load_n(
5268+
(int*)&tc->shared_9d.optimization_state, __ATOMIC_SEQ_CST);
5269+
if (opt_state < TC_PLAN_FINALIZED) {
5270+
if (tp->aborting) {
5271+
goto past_gates;
5272+
}
5273+
if (tc->id != finalized_seg_id) {
5274+
finalized_seg_id = tc->id;
5275+
finalized_wait_count = 0;
5276+
}
5277+
finalized_wait_count++;
5278+
5279+
/* Safety-net timeout: should never fire in normal operation.
5280+
* If it does, something prevented the optimizer from
5281+
* finalizing this segment (successor never arrived, etc.) */
5282+
int timeout = 200;
5283+
if (finalized_wait_count < timeout) {
5284+
return TP_ERR_WAITING;
5285+
}
5286+
finalized_wait_count = 0;
5287+
} else {
5288+
finalized_wait_count = 0;
53065289
}
5307-
gate_wait_count = 0;
53085290
}
53095291
past_gates:
53105292
}

src/emc/tp/tp_types.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -151,6 +151,14 @@ typedef struct TP_STRUCT {
151151
// Actions queued via tpSetSegmentAction() before the next motion segment
152152
segment_actions_t pending_actions;
153153

154+
// Queue sealed flag (planner_type 2).
155+
// Set by tpFlushCompressor_9D() when the interpreter reaches a sync point
156+
// (tool change, dwell, mode change, program end) — signals "no more motion
157+
// segments coming." Cleared by tpAddLine_9D / tpAddCircle_9D when new
158+
// motion arrives. Allows the optimizer to finalize the tail segment
159+
// immediately instead of waiting for a successor that will never come.
160+
int queue_sealed;
161+
154162
} TP_STRUCT;
155163

156164

0 commit comments

Comments
 (0)