import "./context-_sPQqJWv.js";
import "./client-BZVYeBmf.js";
import "./mcp-CzbSsLfc.js";
import "./do-oauth-client-provider-B-ryFIPr.js";
import "./index-CyDpAVHZ.js";
import "./ai-types-U8lYA0o8.js";
import { n as AgentContext, t as Agent } from "./index-B6XHf8p0.js";
import {
JSONSchema7,
StreamTextOnFinishCallback,
Tool,
ToolSet,
UIMessage
} from "ai";
//#region src/ai-chat-agent.d.ts
/**
* Schema for a client-defined tool sent from the browser.
* These tools are executed on the client, not the server.
*
* Note: Uses `parameters` (JSONSchema7) rather than AI SDK's `inputSchema` (FlexibleSchema)
* because this is the wire format. Zod schemas cannot be serialized.
*/
type ClientToolSchema = {
/** Unique name for the tool */
name: string;
/** Human-readable description of what the tool does */
description?: Tool["description"];
/** JSON Schema defining the tool's input parameters */
parameters?: JSONSchema7;
};
/**
* Options passed to the onChatMessage handler.
*/
type OnChatMessageOptions = {
/** AbortSignal for cancelling the request */
abortSignal?: AbortSignal;
/**
* Tool schemas sent from the client for dynamic tool registration.
* These represent tools that will be executed on the client side.
* Use `createToolsFromClientSchemas()` to convert these to AI SDK tool format.
*/
clientTools?: ClientToolSchema[];
};
/**
* Converts client tool schemas to AI SDK tool format.
*
* These tools have no `execute` function - when the AI model calls them,
* the tool call is sent back to the client for execution.
*
* @param clientTools - Array of tool schemas from the client
* @returns Record of AI SDK tools that can be spread into your tools object
*/
declare function createToolsFromClientSchemas(
clientTools?: ClientToolSchema[]
): ToolSet;
/**
* Extension of Agent with built-in chat capabilities
* @template Env Environment type containing bindings
*/
declare class AIChatAgent<Env = unknown, State = unknown> extends Agent<
Env,
State
> {
/**
* Map of message `id`s to `AbortController`s
* useful to propagate request cancellation signals for any external calls made by the agent
*/
private _chatMessageAbortControllers;
/**
* Currently active stream ID for resumable streaming.
* Stored in memory for quick access; persisted in stream_metadata table.
* @internal Protected for testing purposes.
*/
protected _activeStreamId: string | null;
/**
* Request ID associated with the active stream.
* @internal Protected for testing purposes.
*/
protected _activeRequestId: string | null;
/**
* The message currently being streamed. Used to apply tool results
* before the message is persisted.
* @internal
*/
private _streamingMessage;
/**
* Promise that resolves when the current stream completes.
* Used to wait for message persistence before continuing after tool results.
* @internal
*/
private _streamCompletionPromise;
private _streamCompletionResolve;
/**
* Current chunk index for the active stream
*/
private _streamChunkIndex;
/**
* Buffer for stream chunks pending write to SQLite.
* Chunks are batched and flushed when buffer reaches CHUNK_BUFFER_SIZE.
*/
private _chunkBuffer;
/**
* Lock to prevent concurrent flush operations
*/
private _isFlushingChunks;
/**
* Timestamp of the last cleanup operation for old streams
*/
private _lastCleanupTime;
/** Array of chat messages for the current conversation */
messages: UIMessage[];
constructor(ctx: AgentContext, env: Env);
/**
* Restore active stream state if the agent was restarted during streaming.
* Called during construction to recover any interrupted streams.
* Validates stream freshness to avoid sending stale resume notifications.
* @internal Protected for testing purposes.
*/
protected _restoreActiveStream(): void;
/**
* Notify a connection about an active stream that can be resumed.
* The client should respond with CF_AGENT_STREAM_RESUME_ACK to receive chunks.
* Uses in-memory state for request ID - no extra DB lookup needed.
* @param connection - The WebSocket connection to notify
*/
private _notifyStreamResuming;
/**
* Send stream chunks to a connection after receiving ACK.
* @param connection - The WebSocket connection
* @param streamId - The stream to replay
* @param requestId - The original request ID
*/
private _sendStreamChunks;
/**
* Buffer a stream chunk for batch write to SQLite.
* @param streamId - The stream this chunk belongs to
* @param body - The serialized chunk body
* @internal Protected for testing purposes.
*/
protected _storeStreamChunk(streamId: string, body: string): void;
/**
* Flush buffered chunks to SQLite in a single batch.
* Uses a lock to prevent concurrent flush operations.
* @internal Protected for testing purposes.
*/
protected _flushChunkBuffer(): void;
/**
* Start tracking a new stream for resumable streaming.
* Creates metadata entry in SQLite and sets up tracking state.
* @param requestId - The unique ID of the chat request
* @returns The generated stream ID
* @internal Protected for testing purposes.
*/
protected _startStream(requestId: string): string;
/**
* Mark a stream as completed and flush any pending chunks.
* @param streamId - The stream to mark as completed
* @internal Protected for testing purposes.
*/
protected _completeStream(streamId: string): void;
/**
* Clean up old completed streams if enough time has passed since last cleanup.
* This prevents database growth while avoiding cleanup overhead on every stream completion.
*/
private _maybeCleanupOldStreams;
private _broadcastChatMessage;
private _loadMessagesFromDb;
onRequest(request: Request): Promise<Response>;
private _tryCatchChat;
/**
* Handle incoming chat messages and generate a response
* @param onFinish Callback to be called when the response is finished
* @param options Options including abort signal and client-defined tools
* @returns Response to send to the client or undefined
*/
onChatMessage(
onFinish: StreamTextOnFinishCallback<ToolSet>,
options?: OnChatMessageOptions
): Promise<Response | undefined>;
/**
* Save messages on the server side
* @param messages Chat messages to save
*/
saveMessages(messages: UIMessage[]): Promise<void>;
persistMessages(
messages: UIMessage[],
excludeBroadcastIds?: string[]
): Promise<void>;
/**
* Merges incoming messages with existing server state.
* This preserves tool outputs that the server has (via _applyToolResult)
* but the client doesn't have yet.
*
* @param incomingMessages - Messages from the client
* @returns Messages with server's tool outputs preserved
*/
private _mergeIncomingWithServerState;
/**
* Resolves a message for persistence, handling tool result merging.
* If the message contains tool parts with output-available state, checks if there's
* an existing message with the same toolCallId that should be updated instead of
* creating a duplicate. This prevents the "Duplicate item found" error from OpenAI
* when client-side tool results arrive in a new request.
*
* @param message - The message to potentially merge
* @returns The message with the correct ID (either original or merged)
*/
private _resolveMessageForToolMerge;
/**
* Finds an existing assistant message that contains a tool part with the given toolCallId.
* Used to detect when a tool result should update an existing message rather than
* creating a new one.
*
* @param toolCallId - The tool call ID to search for
* @returns The existing message if found, undefined otherwise
*/
private _findMessageByToolCallId;
/**
* Sanitizes a message for persistence by removing ephemeral provider-specific
* data that should not be stored or sent back in subsequent requests.
*
* This handles two issues with the OpenAI Responses API:
*
* 1. **Duplicate item IDs**: The AI SDK's @ai-sdk/openai provider (v2.0.x+)
* defaults to using OpenAI's Responses API which assigns unique itemIds
* to each message part. When these IDs are persisted and sent back,
* OpenAI rejects them as duplicates.
*
* 2. **Empty reasoning parts**: OpenAI may return reasoning parts with empty
* text and encrypted content. These cause "Non-OpenAI reasoning parts are
* not supported" warnings when sent back via convertToModelMessages().
*
* @param message - The message to sanitize
* @returns A new message with ephemeral provider data removed
*/
private _sanitizeMessageForPersistence;
/**
* Helper to strip OpenAI-specific ephemeral fields from a metadata object.
* Removes itemId and reasoningEncryptedContent while preserving other fields.
*/
private _stripOpenAIMetadata;
/**
* Applies a tool result to an existing assistant message.
* This is used when the client sends CF_AGENT_TOOL_RESULT for client-side tools.
* The server is the source of truth, so we update the message here and broadcast
* the update to all clients.
*
* @param toolCallId - The tool call ID this result is for
* @param toolName - The name of the tool
* @param output - The output from the tool execution
* @returns true if the result was applied, false if the message was not found
*/
private _applyToolResult;
private _reply;
/**
* Mark a stream as errored and clean up state.
* @param streamId - The stream to mark as errored
* @internal Protected for testing purposes.
*/
protected _markStreamError(streamId: string): void;
/**
* For the given message id, look up its associated AbortController
* If the AbortController does not exist, create and store one in memory
*
* returns the AbortSignal associated with the AbortController
*/
private _getAbortSignal;
/**
* Remove an abort controller from the cache of pending message responses
*/
private _removeAbortController;
/**
* Propagate an abort signal for any requests associated with the given message id
*/
private _cancelChatRequest;
/**
* Abort all pending requests and clear the cache of AbortControllers
*/
private _destroyAbortControllers;
/**
* When the DO is destroyed, cancel all pending requests and clean up resources
*/
destroy(): Promise<void>;
}
//#endregion
export {
AIChatAgent,
ClientToolSchema,
OnChatMessageOptions,
createToolsFromClientSchemas
};
//# sourceMappingURL=ai-chat-agent.d.ts.map