/* global React, Citation, useCitation */ // chat.jsx — ChatPanel: persistent, embeddable, right-rail on desktop. // Mock SSE adapter swappable with `POST /api/chat?ticker=…` (line 14). // Exposes window.ChatPanel, window.RefusalBanner, window.ChatSession (legacy). const { useState, useEffect, useRef } = React; // ---------------------------------------------------------------------------- // Pluggable chat client. // Today: mock SSE generator yielding tool-call + token events. // Swap: replace `mockChat` with a real EventSource / fetch+ReadableStream that // hits `POST /api/chat?ticker=…` and yields the same {type, payload} events. // ---------------------------------------------------------------------------- async function* chatStream({ ticker, messages, signal }) { // Try real backend first; fall back to mock. if (window.CCT_USE_REAL_API) { try { yield* realChat({ ticker, messages, signal }); return; } catch (e) { console.warn("[chat] real API failed, falling back to mock", e); } } yield* mockChat({ ticker, messages, signal }); } // Translates backend SSE events to the frontend ChatPanel event shape. // Backend emits: text / tool_call / tool_result / error / done // Frontend wants: tool / token / para_start / para_end / quote / done async function* realChat({ ticker, messages, signal }) { const base = window.CCT_API_BASE || ""; // Cap at last 10 turns (20 messages). Drop empty-text messages so assistant // turns that weren't serialized get filtered out — they'd confuse the model. const trimmed = (messages || []) .filter(m => m && m.role && (m.content || "").trim()) .slice(-20); const res = await fetch(`${base}/api/chat?ticker=${encodeURIComponent(ticker)}`, { method: "POST", headers: { "Content-Type": "application/json" }, body: JSON.stringify({ messages: trimmed }), signal }); if (!res.ok || !res.body) throw new Error(`chat ${res.status}`); const reader = res.body.getReader(); const dec = new TextDecoder(); let buf = ""; // Track pending tool_calls so we can merge with their tool_result. const pendingTools = new Map(); // name → { name, args, ok?, summary? } let inPara = false; // First text event after a tool result implies a fresh paragraph; the // backend doesn't emit explicit paragraph markers, so we map double-newline // boundaries inside `text` chunks to para_end + para_start. // Strip noise citation tags the model emits when section couldn't be // resolved — e.g. "[Q1FY26, unknown]" → "". Real citations are surfaced as // proper Citation chips via QuoteEvent.cite, so leaving these inline only // creates visual stutter. function cleanText(text) { return (text || "") .replace(/\s*\[[QqHhFfYy0-9]+,\s*(unknown|transcript)\]/gi, "") .replace(/[ \t]+\n/g, "\n"); } function* emitText(text) { text = cleanText(text); if (!text) return; if (!inPara) { yield { type: "para_start" }; inPara = true; } // Split on blank lines to honour paragraph breaks the model produces. const segs = text.split(/\n{2,}/); for (let i = 0; i < segs.length; i++) { if (i > 0) { if (inPara) { yield { type: "para_end" }; inPara = false; } yield { type: "para_start" }; inPara = true; } if (segs[i]) yield { type: "token", text: segs[i] }; } } outer: while (true) { const { value, done } = await reader.read(); if (done) break; buf += dec.decode(value, { stream: true }); let idx; while ((idx = buf.indexOf("\n\n")) >= 0) { const chunk = buf.slice(0, idx).trim(); buf = buf.slice(idx + 2); if (!chunk) continue; const data = chunk.startsWith("data:") ? chunk.slice(5).trim() : chunk; let ev; try { ev = JSON.parse(data); } catch { continue; } if (ev.type === "text") { for (const out of emitText(ev.text)) yield out; } else if (ev.type === "quote") { // Inline block quote inside the current paragraph. if (!inPara) { yield { type: "para_start" }; inPara = true; } yield { type: "quote", text: ev.text, cite: ev.cite || null }; } else if (ev.type === "tool_call") { // Close any open paragraph so tool chips render between paragraphs. if (inPara) { yield { type: "para_end" }; inPara = false; } pendingTools.set(ev.name, { name: ev.name, args: ev.args || {}, ok: false, summary: "running…" }); } else if (ev.type === "tool_result") { const t = pendingTools.get(ev.name) || { name: ev.name, args: {} }; t.ok = !!ev.ok; t.summary = ev.summary || (ev.ok ? "ok" : "error"); pendingTools.delete(ev.name); yield { type: "tool", tool: t }; } else if (ev.type === "error") { if (inPara) { yield { type: "para_end" }; inPara = false; } yield { type: "para_start" }; yield { type: "token", text: `[error] ${ev.message}` }; yield { type: "para_end" }; yield { type: "done" }; break outer; } else if (ev.type === "done") { if (inPara) { yield { type: "para_end" }; inPara = false; } yield { type: "done" }; break outer; } } } } // Mock — scripted-but-believable SSE for the seeded PERSISTENT question. // Anything else gets a graceful "demo agent" event. // Normalize a cite that may be either a mock {ck, label} object or a plain // backend string ("Q3FY26, Q&A") into the shape Citation expects: {chunk, // label, ck}. Returns null if the cite is empty or just "unknown" / "transcript" // (no useful section info). function normalizeCite(cite) { if (!cite) return null; if (typeof cite === "string") { const s = cite.trim(); if (!s || /^(unknown|transcript)$/i.test(s)) return null; // "Q3FY26, Q&A" → chunk=Q3FY26, label=Q&A. Tolerate "·" or ":" too. const m = s.match(/^([^,·:]+)[,·:]\s*(.+)$/); if (m) return { chunk: m[1].trim(), label: m[2].trim(), ck: null }; return { chunk: s, label: "transcript", ck: null }; } // {ck, label: "Q3FY26 · Q&A"} (mock shape) if (cite.label) { const parts = String(cite.label).split(/ · | - /); return { chunk: parts[0] || "", label: parts[1] || "transcript", ck: cite.ck || null, }; } return null; } // Flatten an assistant turn's paragraph chunks into plain text for replay. // Joins paragraphs with blank lines; keeps text/inline/quote text but drops // tool chips and citation refs — only what the user actually saw. function assistantText(turn) { if (!turn || !turn.paragraphs) return ""; return turn.paragraphs .map(para => (para || []).map(c => c?.text || "").join("")) .map(s => s.trim()) .filter(Boolean) .join("\n\n"); } async function* mockChat({ ticker, messages, signal }) { const last = messages[messages.length - 1]?.content || ""; const refusal = /\b(buy|sell|target price|price target|should i buy|should i sell|fair value)\b/i.test(last); const macroMatch = /macro|disappear|disappeared|drop|dropped/i.test(last); const assistxMatch = /assistx|customer zero/i.test(last); const dodgeMatch = /dodge|refuse|refused|walk back|walked back|margin/i.test(last); const sleep = (ms) => new Promise(r => setTimeout(r, ms)); if (refusal) { await sleep(220); yield { type: "refusal" }; yield { type: "token", text: "I don't issue buy / sell calls or price targets — that's not what this dataset is for. " }; await sleep(180); yield { type: "token", text: "The briefs surface what management said and stopped saying; the trade thesis is yours to write." }; yield { type: "done" }; return; } // PERSISTENT-grounded canned answers if (ticker === "PERSISTENT" && macroMatch) { yield { type: "tool", tool: { name: "get_brief", args: { ticker: "PERSISTENT" }, ok: true, summary: "1 brief, 4 quarters" } }; await sleep(280); yield { type: "tool", tool: { name: "get_topic_trajectory", args: { ticker: "PERSISTENT", topic: "Macroeconomic / geopolitical uncertainty" }, ok: true, summary: "[8, 5, 4, 0]" } }; await sleep(300); yield { type: "tool", tool: { name: "search_passages", args: { ticker: "PERSISTENT", query: "macro tariff DOGE", quarter: "Q3FY26" }, ok: true, summary: "0 hits" } }; await sleep(260); yield { type: "para_start" }; for (const t of ["Macro went from the dominant defensive frame to absent in one quarter. ", "The trajectory across the four calls is "]) { yield { type: "token", text: t }; await sleep(80); } yield { type: "inline", kind: "code", text: "8 → 5 → 4 → 0 mentions" }; yield { type: "token", text: "." }; yield { type: "para_end" }; yield { type: "para_start" }; yield { type: "token", text: "The peak was Q2FY26, when Sandeep Kalra opened most forward-looking answers with the macro caveat: " }; yield { type: "quote", text: "the macro remains interesting and it keeps on going through its own challenges. You may have heard the multiple news that keeps on coming up weeks and quarters in the US.", cite: { ck: "PERSISTENT_Q2FY26_qa_macro", label: "Q2FY26 · Q&A" } }; yield { type: "para_end" }; yield { type: "para_start" }; yield { type: "token", text: "By Q3FY26 macro is not mentioned in either prepared remarks or Q&A. The juxtaposition with healthcare YoY growth halving from 33.6% to 7.4% over the same window suggests a deliberate pivot away from defensiveness." }; yield { type: "para_end" }; yield { type: "done" }; return; } if (ticker === "PERSISTENT" && assistxMatch) { yield { type: "tool", tool: { name: "search_passages", args: { ticker: "PERSISTENT", query: "AssistX Customer Zero", quarter: "Q3FY26" }, ok: true, summary: "11 hits · 1500+ words" } }; await sleep(280); yield { type: "para_start" }; yield { type: "token", text: "Q3FY26 introduced an entire new speaker — Debashis Singh, CIO — and a 1,500+ word block on internal AI usage. " }; yield { type: "quote", text: "As Customer Zero, our early investment in trusted AI platforms… have allowed us to move AI from pilots to production", cite: { ck: "PERSISTENT_Q3FY26_or_assistx", label: "Q3FY26 · opening remarks" } }; yield { type: "para_end" }; yield { type: "para_start" }; yield { type: "token", text: "The framing is 'Customer Zero' — internal deployment as proof-of-capability for selling externally. Risk: the metrics (PiAssist 83%, ITAssist 3hr→30min) are easy to recite and hard to verify. Worth probing whether AssistX-driven external deals exist yet." }; yield { type: "para_end" }; yield { type: "done" }; return; } if (ticker === "PERSISTENT" && dodgeMatch) { yield { type: "tool", tool: { name: "find_dodged_questions", args: { ticker: "PERSISTENT", quarter: "Q3FY26" }, ok: true, summary: "1 dodge" } }; await sleep(280); yield { type: "para_start" }; yield { type: "token", text: "The sharpest dodge was Bhavik Mehta's 18-24mo margin tailwind question — explicit refusal to engage. " }; yield { type: "quote", text: "we are not aspiring now to take it another 200 basis points up. We are happy where we are reaching.", cite: { ck: "PERSISTENT_Q3FY26_qa_margin", label: "Q3FY26 · Q&A" } }; yield { type: "para_end" }; yield { type: "done" }; return; } // Generic fallback await sleep(280); yield { type: "para_start" }; yield { type: "token", text: "Demo agent — only seeded prompts have a scripted answer for this prototype. " }; yield { type: "token", text: `In production this calls the live MCP tools against ${ticker}'s brief.` }; yield { type: "para_end" }; yield { type: "done" }; } // ---------------------------------------------------------------------------- // Tool chip (compact, inline) // ---------------------------------------------------------------------------- function ToolChip({ t, expanded, onToggle }) { return ( ); } function ToolDetail({ t }) { return (
{t.name} · MCP tool · stdio {t.ok ? "200 OK" : "running"}
{Object.entries(t.args).map(([k, v]) => (
{k}: {JSON.stringify(v)}
))}
{t.summary}
); } // ---------------------------------------------------------------------------- // RefusalBanner — for chat refusal turns + standalone // ---------------------------------------------------------------------------- function RefusalBanner() { return (
[ 403 ]
This tool doesn't issue buy / sell calls or price targets.

Throughline surfaces what management said and what they stopped saying — drift, dodges, guidance moves. The trade thesis is yours to write.

); } // ---------------------------------------------------------------------------- // ChatPanel — persistent, embeddable, mountable as right-rail or full-width. // ---------------------------------------------------------------------------- function ChatPanel({ ticker, latest, navigate }) { const [turns, setTurns] = useState([]); const [streaming, setStreaming] = useState(false); const [input, setInput] = useState(""); const [expanded, setExpanded] = useState({}); const [unavailable, setUnavailable] = useState(false); const abortRef = useRef(null); const scrollRef = useRef(null); const suggestions = window.CCT_SUGGESTIONS?.[ticker] || window.CCT_SUGGESTIONS?.global || []; function pushTurn(t) { setTurns(prev => [...prev, t]); } function patchLast(fn) { setTurns(prev => { const next = prev.slice(); const last = { ...next[next.length - 1] }; fn(last); next[next.length - 1] = last; return next; }); } async function send(q) { const text = q.trim(); if (!text || streaming) return; setInput(""); pushTurn({ role: "user", text }); // assistant turn skeleton const assistant = { role: "assistant", tools: [], parts: [], refusal: false, paragraphs: [[]] }; pushTurn(assistant); setStreaming(true); abortRef.current = new AbortController(); try { const messages = [ ...turns.map(t => ({ role: t.role, content: t.role === "user" ? (t.text || "") : assistantText(t), })), { role: "user", content: text }, ]; let curPara = 0; for await (const ev of chatStream({ ticker, messages, signal: abortRef.current.signal })) { if (ev.type === "tool") { patchLast(t => { t.tools = [...t.tools, ev.tool]; }); } else if (ev.type === "refusal") { patchLast(t => { t.refusal = true; }); } else if (ev.type === "para_start") { patchLast(t => { t.paragraphs = [...t.paragraphs, []]; }); curPara++; } else if (ev.type === "para_end") { /* no-op */ } else if (ev.type === "token") { patchLast(t => { const ps = t.paragraphs.map(p => p.slice()); ps[ps.length - 1] = [...ps[ps.length - 1], { kind: "text", text: ev.text }]; t.paragraphs = ps; }); } else if (ev.type === "inline") { patchLast(t => { const ps = t.paragraphs.map(p => p.slice()); ps[ps.length - 1] = [...ps[ps.length - 1], { kind: "code", text: ev.text }]; t.paragraphs = ps; }); } else if (ev.type === "quote") { patchLast(t => { const ps = t.paragraphs.map(p => p.slice()); ps[ps.length - 1] = [...ps[ps.length - 1], { kind: "quote", text: ev.text, cite: ev.cite }]; t.paragraphs = ps; }); } else if (ev.type === "done") { break; } } } catch (e) { if (e.name !== "AbortError") { setUnavailable(true); patchLast(t => { t.error = "Chat is unavailable right now. The brief above is the durable surface — it doesn't depend on this connection."; }); } } finally { setStreaming(false); abortRef.current = null; } } function stop() { abortRef.current?.abort(); setStreaming(false); } useEffect(() => () => abortRef.current?.abort(), []); // Reset chat when ticker changes useEffect(() => { setTurns([]); setExpanded({}); setUnavailable(false); abortRef.current?.abort(); }, [ticker]); return (
Ask the agent · {ticker}
Concall-grounded · {latest} · refuses price/buy-sell
{turns.length === 0 && (
Suggested questions
{suggestions.map(s => ( ))}
{unavailable && (
[ offline ]
Chat is unavailable right now. The brief on the left is the durable surface.
)}
)} {turns.map((t, i) => (
{t.role === "user" ? "You" : t.refusal ? "Agent · refused" : "Agent"} {t.role === "assistant" && i === turns.length - 1 && streaming && ( · streaming )}
{t.role === "user" ? (
{t.text}
) : (
{t.refusal && } {t.tools && t.tools.length > 0 && (
{t.tools.map((tool, ti) => { const key = `${i}-${ti}`; return ( setExpanded(e => ({ ...e, [key]: !e[key] }))} /> {expanded[key] && } ); })}
)} {t.paragraphs && t.paragraphs.map((p, pi) => p.length === 0 ? null : (

{p.map((seg, si) => { if (seg.kind === "text") return {seg.text}; if (seg.kind === "code") return {seg.text}; if (seg.kind === "quote") { const cite = normalizeCite(seg.cite); return ( "{seg.text}" {cite && ( <> {" "} )} ); } return null; })}

))} {t.error && (
[ offline ]
{t.error}
)} {/* Suggested follow-ups after a refusal */} {t.refusal && (
{suggestions.map(s => ( ))}
)}
)}
))}