embeddings.mjs•2.26 kB
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import { APIResource } from "../resource.mjs";
import * as Core from "../core.mjs";
export class Embeddings extends APIResource {
/**
* Creates an embedding vector representing the input text.
*
* @example
* ```ts
* const createEmbeddingResponse =
* await client.embeddings.create({
* input: 'The quick brown fox jumped over the lazy dog',
* model: 'text-embedding-3-small',
* });
* ```
*/
create(body, options) {
const hasUserProvidedEncodingFormat = !!body.encoding_format;
// No encoding_format specified, defaulting to base64 for performance reasons
// See https://github.com/openai/openai-node/pull/1312
let encoding_format = hasUserProvidedEncodingFormat ? body.encoding_format : 'base64';
if (hasUserProvidedEncodingFormat) {
Core.debug('Request', 'User defined encoding_format:', body.encoding_format);
}
const response = this._client.post('/embeddings', {
body: {
...body,
encoding_format: encoding_format,
},
...options,
});
// if the user specified an encoding_format, return the response as-is
if (hasUserProvidedEncodingFormat) {
return response;
}
// in this stage, we are sure the user did not specify an encoding_format
// and we defaulted to base64 for performance reasons
// we are sure then that the response is base64 encoded, let's decode it
// the returned result will be a float32 array since this is OpenAI API's default encoding
Core.debug('response', 'Decoding base64 embeddings to float32 array');
return response._thenUnwrap((response) => {
if (response && response.data) {
response.data.forEach((embeddingBase64Obj) => {
const embeddingBase64Str = embeddingBase64Obj.embedding;
embeddingBase64Obj.embedding = Core.toFloat32Array(embeddingBase64Str);
});
}
return response;
});
}
}
//# sourceMappingURL=embeddings.mjs.map