|
| 1 | +# TASK-001: Chat Streaming Fixes |
| 2 | + |
| 3 | +**Status**: IN PROGRESS |
| 4 | +**Priority**: CRITICAL |
| 5 | +**Created**: 2026-02-16 |
| 6 | + |
| 7 | +## Problem Statement |
| 8 | + |
| 9 | +1. **Blank screen during reasoning**: Agent thinks but shows nothing until text arrives |
| 10 | +2. **Token counter broken**: Already exists in UI but shows 0 - backend sends `usage.reasoningTokens=262` but UI doesn't parse it |
| 11 | +3. **Reasoning not displayed**: Backend sends `providerOptions.google.thoughtSignature` but UI doesn't render it |
| 12 | + |
| 13 | +## Root Causes |
| 14 | + |
| 15 | +### 1. Token Counter Not Working |
| 16 | + |
| 17 | +- Backend sends in `step-finish` or `finish` parts |
| 18 | +- UI doesn't extract from these part types |
| 19 | +- File: `chat-context.tsx` - need to parse `step-finish` parts for usage |
| 20 | + |
| 21 | +### 2. Reasoning Not Displayed |
| 22 | + |
| 23 | +- Backend: `providerOptions.google.thoughtSignature` (base64 encoded) |
| 24 | +- UI checks: `ReasoningUIPart.type === 'reasoning'` |
| 25 | +- Mismatch: Google sends reasoning via providerMetadata, not as ReasoningUIPart |
| 26 | +- File: `chat-context.tsx` lines 262-277 `streamingReasoning` useMemo |
| 27 | + |
| 28 | +### 3. Blank During Reasoning |
| 29 | + |
| 30 | +- `streamingContent` is empty during reasoning-only phase |
| 31 | +- Loading indicator shows "Thinking..." but no reasoning content |
| 32 | +- Need: Show `AgentReasoning` component when `streamingReasoning` has content but `streamingContent` is empty |
| 33 | + |
| 34 | +## Backend Evidence (from logs) |
| 35 | + |
| 36 | +```json |
| 37 | +{ |
| 38 | + "usage": { |
| 39 | + "inputTokens": 12305, |
| 40 | + "outputTokens": 908, |
| 41 | + "totalTokens": 13213, |
| 42 | + "reasoningTokens": 262 |
| 43 | + }, |
| 44 | + "providerOptions": { |
| 45 | + "google": { |
| 46 | + "thoughtSignature": "Es4ICssIAb4+9vsk+5nPbMKwgqEFaoK3BzttVElSqYCQnFewEG2J1TYiIlhkGNADgLFjRosd483aJTto3PkfhjFngu+DAXzMUoTX+pn02lLwVIHgiy+G6FDwwVaEu/3rIuQPYvS29yA+uUMomHBeWCS6UOHAUbMx4Jz/tLneDbyN35JkNA1PIOK7d5uGubBkZ3cRtLJtv1bYGUJvuBYcrylDCAp129IFdl3I0jR+WpNF+3Khk3tEFujyf4xYoC+2otS61gmyV3gDv23Fa9bpt3eRN6c/cWtN0Y5f928igx+wWVnFRwO+dbtLmovbsyLRoVr2C+4O8a4rKsZN00PaLy+ibuB4/st9pXGAoC70EFsHPcb1KhLg8igmHpXjGafhwKQ9dSBbfx09F0Yh5mP1FiKbL/AjpLuYB9wgSQZRr9FZI/FHgUme1CigXA+jx6AqoYzKAn7xaDnljR29n179UxP6YWQamdPwZZY37FO1wwlfU7hxgxvFQP/JP1u2ERl5M5ExIeOkIFIivRReYa/kNKnre4Kofy/0l9TplSGgpjKx6pHbvysYCxAWqFzriM2LoffCGMSdxxDbIdQE1cb15NRxau+MdWUOslzVmp0Uzs47r8p+rSxYYJxIB3TNiWc2sgguWwM9HwbTLFcGnt/WU7OWF5sl/woeu6J9QdUw4VbwlYNSutsK/fW8n52lI6VDC91DM+JyizBNGTgyYUClJZz/FqkVZiwpbrT0KE2Y5bXNpim2wLWT3we6Bjkf8DcEmVOCiFfou9APDgqqlHV791X7f3UtzzsIAqtUzzJS1rVMBYkTzwdm2aCgOpakzKinMAimi7pXUWweZelcAeSV0mkc/sNfLO5nAW9rZOwZFgMtXXSBi9OrWt+oBWgmo1lLA6EJpyxM0rYXwMP+LdaIvvBlOlUu3kXEU6MTzCPMDnfSzTBmWlwXEig3yOib7CkOBydVsbzFSYtYbNOwTucUZLnXaP01RcHnUrW6Rqn707tsULiD2nkxIX0jvZlSneOq7ia/MQnbsekigJDWyUeIzlUivjqsJ8T9G9a0VI0gRtwHp0/jyxUhZHb6RSfrJLQgRir+qpjnXcgEeTvtBOuIB8ONtUYut214sH05myFny0xpQwZLaiz4uDsdEBVh40oUNH72ZG4an00KgGZv772IJHOeF1buH55gKUnI+ukbkaMKe6Vc8t2+mSHQ4nGED9KV84eGYYMztjI2LtKOWLdifD/l5VYrO1GQwRRnkeAq1ns774nkYFvxMqxM/+NgCW6XOVew63f4B6XJlaIygdV5rMUVP4ZLVACqHYoRIpcWZVNNV2nv00ouCrzLABRFIIUwvIWyr5VTtNdXTGqny5e/1klcUpY0zZ8vP/QpSdzAeykdZw69wi0G0uBL5NjBnmAVSyLlO2TGoCOuIJa1MBrk8XZ37McNpzeDXYiYK+KsdfeRBQCLDkLnehFMX4QT+tIxg==" |
| 47 | + } |
| 48 | + } |
| 49 | +} |
| 50 | +``` |
| 51 | + |
| 52 | +## Key Files to Modify |
| 53 | + |
| 54 | +| File | Lines | What to Fix | |
| 55 | +| ------------------- | --------- | ------------------------------------------------------ | |
| 56 | +| `chat-context.tsx` | 262-277 | `streamingReasoning` - add thoughtSignature extraction | |
| 57 | +| `chat-context.tsx` | ??? | Add usage extraction from `step-finish` parts | |
| 58 | +| `chat-messages.tsx` | 1697-1735 | Show `AgentReasoning` during reasoning-only streaming | |
| 59 | + |
| 60 | +## AI SDK v6 Types Needed |
| 61 | + |
| 62 | +```typescript |
| 63 | +import { |
| 64 | + // Type guards |
| 65 | + isReasoningUIPart, |
| 66 | + isTextUIPart, |
| 67 | + isStepStartUIPart, |
| 68 | + isDataUIPart, |
| 69 | + |
| 70 | + // Part types |
| 71 | + ReasoningUIPart, |
| 72 | + TextUIPart, |
| 73 | + StepStartUIPart, |
| 74 | + |
| 75 | + // Usage types |
| 76 | + FinishReason, |
| 77 | + StepResult, |
| 78 | +} from 'ai' |
| 79 | +``` |
| 80 | + |
| 81 | +## Implementation Plan |
| 82 | + |
| 83 | +### Step 1: Fix streamingReasoning Extraction |
| 84 | + |
| 85 | +```typescript |
| 86 | +const streamingReasoning = useMemo(() => { |
| 87 | + const lastMessage = messages[messages.length - 1] |
| 88 | + if (lastMessage?.role === 'assistant') { |
| 89 | + // 1. Check for ReasoningUIPart |
| 90 | + const reasoningPart = lastMessage.parts?.find( |
| 91 | + (p): p is ReasoningUIPart => p.type === 'reasoning' |
| 92 | + ) |
| 93 | + if (reasoningPart?.text) return reasoningPart.text |
| 94 | + |
| 95 | + // 2. Check providerMetadata.google.thoughtSignature |
| 96 | + for (const part of lastMessage.parts ?? []) { |
| 97 | + const pm = (part as any).providerMetadata |
| 98 | + if (pm?.google?.thoughtSignature) { |
| 99 | + // thoughtSignature is base64 encoded |
| 100 | + try { |
| 101 | + return atob(pm.google.thoughtSignature) |
| 102 | + } catch { |
| 103 | + return pm.google.thoughtSignature |
| 104 | + } |
| 105 | + } |
| 106 | + } |
| 107 | + |
| 108 | + // 3. Fallback to extractThoughtSummaryFromParts |
| 109 | + return extractThoughtSummaryFromParts(lastMessage.parts) |
| 110 | + } |
| 111 | + return '' |
| 112 | +}, [messages]) |
| 113 | +``` |
| 114 | + |
| 115 | +### Step 2: Fix Usage Extraction from step-finish |
| 116 | + |
| 117 | +```typescript |
| 118 | +const usage: TokenUsage | null = useMemo(() => { |
| 119 | + for (const message of messages) { |
| 120 | + if (message.role === 'assistant') { |
| 121 | + for (const part of message.parts ?? []) { |
| 122 | + if (part.type === 'step-finish' || part.type === 'finish') { |
| 123 | + const usageData = (part as any).usage |
| 124 | + if (usageData) { |
| 125 | + return { |
| 126 | + inputTokens: |
| 127 | + usageData.promptTokens ?? |
| 128 | + usageData.inputTokens ?? |
| 129 | + 0, |
| 130 | + outputTokens: |
| 131 | + usageData.completionTokens ?? |
| 132 | + usageData.outputTokens ?? |
| 133 | + 0, |
| 134 | + totalTokens: usageData.totalTokens ?? 0, |
| 135 | + inputTokenDetails: { |
| 136 | + cacheReadTokens: |
| 137 | + usageData.inputTokenDetails?.cacheRead ?? 0, |
| 138 | + cacheWriteTokens: |
| 139 | + usageData.inputTokenDetails?.cacheWrite ?? |
| 140 | + 0, |
| 141 | + noCacheTokens: |
| 142 | + usageData.inputTokenDetails?.noCache ?? 0, |
| 143 | + }, |
| 144 | + outputTokenDetails: { |
| 145 | + textTokens: |
| 146 | + usageData.outputTokenDetails?.text ?? 0, |
| 147 | + reasoningTokens: |
| 148 | + usageData.outputTokenDetails?.reasoning ?? |
| 149 | + 0, |
| 150 | + }, |
| 151 | + } |
| 152 | + } |
| 153 | + } |
| 154 | + } |
| 155 | + } |
| 156 | + } |
| 157 | + return null |
| 158 | +}, [messages]) |
| 159 | +``` |
| 160 | + |
| 161 | +### Step 3: Fix Blank During Reasoning |
| 162 | + |
| 163 | +In chat-messages.tsx, add condition to show AgentReasoning when: |
| 164 | + |
| 165 | +- `status === 'streaming'` |
| 166 | +- `streamingReasoning` has content |
| 167 | +- `streamingContent` is empty |
| 168 | + |
| 169 | +## Notes |
| 170 | + |
| 171 | +- Don't add token display - it EXISTS, just FIX the extraction |
| 172 | +- Don't modify selectAgent - just needs to match agentId string |
| 173 | +- Focus on extraction logic in chat-context.tsx |
0 commit comments