Skip to main content
Glama

Gotas Commerce MCP Server

batches.js3.85 kB
"use strict"; // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. Object.defineProperty(exports, "__esModule", { value: true }); exports.BetaMessageBatchesPage = exports.Batches = void 0; const resource_1 = require("../../../resource.js"); const core_1 = require("../../../core.js"); const pagination_1 = require("../../../pagination.js"); const jsonl_1 = require("../../../internal/decoders/jsonl.js"); const error_1 = require("../../../error.js"); class Batches extends resource_1.APIResource { /** * Send a batch of Message creation requests. * * The Message Batches API can be used to process multiple Messages API requests at * once. Once a Message Batch is created, it begins processing immediately. Batches * can take up to 24 hours to complete. */ create(params, options) { const { betas, ...body } = params; return this._client.post('/v1/messages/batches?beta=true', { body, ...options, headers: { 'anthropic-beta': [...(betas ?? []), 'message-batches-2024-09-24'].toString(), ...options?.headers, }, }); } retrieve(messageBatchId, params = {}, options) { if ((0, core_1.isRequestOptions)(params)) { return this.retrieve(messageBatchId, {}, params); } const { betas } = params; return this._client.get(`/v1/messages/batches/${messageBatchId}?beta=true`, { ...options, headers: { 'anthropic-beta': [...(betas ?? []), 'message-batches-2024-09-24'].toString(), ...options?.headers, }, }); } list(params = {}, options) { if ((0, core_1.isRequestOptions)(params)) { return this.list({}, params); } const { betas, ...query } = params; return this._client.getAPIList('/v1/messages/batches?beta=true', BetaMessageBatchesPage, { query, ...options, headers: { 'anthropic-beta': [...(betas ?? []), 'message-batches-2024-09-24'].toString(), ...options?.headers, }, }); } cancel(messageBatchId, params = {}, options) { if ((0, core_1.isRequestOptions)(params)) { return this.cancel(messageBatchId, {}, params); } const { betas } = params; return this._client.post(`/v1/messages/batches/${messageBatchId}/cancel?beta=true`, { ...options, headers: { 'anthropic-beta': [...(betas ?? []), 'message-batches-2024-09-24'].toString(), ...options?.headers, }, }); } async results(messageBatchId, params = {}, options) { if ((0, core_1.isRequestOptions)(params)) { return this.results(messageBatchId, {}, params); } const batch = await this.retrieve(messageBatchId); if (!batch.results_url) { throw new error_1.AnthropicError(`No batch \`results_url\`; Has it finished processing? ${batch.processing_status} - ${batch.id}`); } const { betas } = params; return this._client .get(batch.results_url, { ...options, headers: { 'anthropic-beta': [...(betas ?? []), 'message-batches-2024-09-24'].toString(), ...options?.headers, }, __binaryResponse: true, }) ._thenUnwrap((_, props) => jsonl_1.JSONLDecoder.fromResponse(props.response, props.controller)); } } exports.Batches = Batches; class BetaMessageBatchesPage extends pagination_1.Page { } exports.BetaMessageBatchesPage = BetaMessageBatchesPage; Batches.BetaMessageBatchesPage = BetaMessageBatchesPage; //# sourceMappingURL=batches.js.map

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/caiovicentino/mcpGOTAS'

If you have feedback or need assistance with the MCP directory API, please join our Discord server