Skip to main content
POST
/
v1
/
embeddings
Create Embeddings
curl --request POST \
  --url https://api.example.com/v1/embeddings
{
  "object": "<string>",
  "model": "<string>",
  "data": {
    "object": "<string>",
    "index": {},
    "embedding": {}
  },
  "usage": {
    "prompt_tokens": {},
    "total_tokens": {}
  }
}

Overview

Create embeddings using the configured provider. Converts text into dense vector representations suitable for semantic search, clustering, and other NLP tasks.

Method Signature

func (r *EmbeddingService) New(
    ctx context.Context,
    body EmbeddingNewParams,
    opts ...option.RequestOption,
) (*CreateEmbeddingResponse, error)

Request Parameters

input
string | []string | []int | [][]int
required
Input text to embed. Can be:
  • A single string
  • An array of strings (batch processing)
  • An array of token IDs
  • An array of token ID arrays
Maximum 2048 items per request. Individual inputs cannot exceed 8192 tokens. Total tokens across all inputs cannot exceed 300,000.
model
string
required
ID of the embedding model to use. Examples:
  • openai/text-embedding-ada-002
  • openai/text-embedding-3-small
  • openai/text-embedding-3-large
encoding_format
string
default:"float"
Format to return embeddings in. Options:
  • float - Array of floating point numbers
  • base64 - Base64-encoded string
dimensions
int64
Number of dimensions for the output embeddings. Only supported in text-embedding-3 and later models. Allows you to reduce embedding dimensions for smaller vector sizes.
user
string
Unique identifier for the end-user for abuse monitoring

Response Fields

object
string
Object type, always list
model
string
required
The model used for generating embeddings
data
[]EmbeddingData
required
Array of embedding objects
usage
map[string]int64
required
Token usage statistics

Code Examples

Single Text Embedding

package main

import (
    "context"
    "fmt"
    "log"

    dedalus "github.com/dedalus-labs/dedalus-sdk-go"
    "github.com/dedalus-labs/dedalus-sdk-go/option"
)

func main() {
    client := dedalus.NewClient(
        option.WithAPIKey("your-api-key"),
    )

    ctx := context.Background()
    
    response, err := client.Embeddings.New(ctx, dedalus.EmbeddingNewParams{
        CreateEmbeddingRequest: dedalus.CreateEmbeddingRequestParam{
            Input: dedalus.F(dedalus.CreateEmbeddingRequestInputUnionParam(
                dedalus.String("The quick brown fox jumps over the lazy dog"),
            )),
            Model: dedalus.F(dedalus.CreateEmbeddingRequestModel(
                "openai/text-embedding-3-small",
            )),
        },
    })

    if err != nil {
        log.Fatal(err)
    }

    fmt.Printf("Generated %d-dimensional embedding\n", len(response.Data[0].Embedding))
}

Batch Embeddings

texts := []string{
    "Machine learning is a subset of artificial intelligence",
    "Natural language processing enables computers to understand text",
    "Deep learning uses neural networks with multiple layers",
}

response, err := client.Embeddings.New(ctx, dedalus.EmbeddingNewParams{
    CreateEmbeddingRequest: dedalus.CreateEmbeddingRequestParam{
        Input: dedalus.F(dedalus.CreateEmbeddingRequestInputUnionParam(
            dedalus.CreateEmbeddingRequestInputCreateEmbeddingRequestInputArrayParam(
                texts,
            ),
        )),
        Model: dedalus.F(dedalus.CreateEmbeddingRequestModel(
            "openai/text-embedding-3-small",
        )),
    },
})

if err != nil {
    log.Fatal(err)
}

for i, embedding := range response.Data {
    fmt.Printf("Embedding %d: %d dimensions\n", embedding.Index, len(embedding.Embedding))
}

Custom Dimensions

response, err := client.Embeddings.New(ctx, dedalus.EmbeddingNewParams{
    CreateEmbeddingRequest: dedalus.CreateEmbeddingRequestParam{
        Input: dedalus.F(dedalus.CreateEmbeddingRequestInputUnionParam(
            dedalus.String("Sample text for embedding"),
        )),
        Model: dedalus.F(dedalus.CreateEmbeddingRequestModel(
            "openai/text-embedding-3-large",
        )),
        Dimensions: dedalus.F(int64(256)), // Reduce from default 3072 to 256
    },
})

Base64 Encoding

response, err := client.Embeddings.New(ctx, dedalus.EmbeddingNewParams{
    CreateEmbeddingRequest: dedalus.CreateEmbeddingRequestParam{
        Input: dedalus.F(dedalus.CreateEmbeddingRequestInputUnionParam(
            dedalus.String("Sample text"),
        )),
        Model: dedalus.F(dedalus.CreateEmbeddingRequestModel(
            "openai/text-embedding-3-small",
        )),
        EncodingFormat: dedalus.F(dedalus.CreateEmbeddingRequestEncodingFormatBase64),
    },
})

if err != nil {
    log.Fatal(err)
}

// embedding is returned as base64 string
base64Embedding := response.Data[0].Embedding.(string)

Supported Models

  • openai/text-embedding-ada-002 - Most cost-effective, 1536 dimensions
  • openai/text-embedding-3-small - Better performance, 1536 dimensions (configurable)
  • openai/text-embedding-3-large - Best performance, 3072 dimensions (configurable)

Use Cases

  • Semantic Search: Find similar documents or passages
  • Clustering: Group similar texts together
  • Recommendations: Find related content
  • Classification: Train classifiers on embedding features
  • Anomaly Detection: Identify outliers in text data

Best Practices

  1. Batch Processing: Process multiple texts in a single request for better performance
  2. Token Limits: Keep individual inputs under 8192 tokens
  3. Dimension Reduction: Use the dimensions parameter to reduce vector size for faster operations
  4. Model Selection: Choose text-embedding-3-small for most use cases, 3-large for maximum quality

Build docs developers (and LLMs) love