Skip to main content
GET
/
v1
/
models
List Models
curl --request GET \
  --url https://api.example.com/v1/models
{
  "object": "<string>",
  "data": {
    "id": "<string>",
    "provider": "<string>",
    "created_at": {},
    "display_name": "<string>",
    "description": "<string>",
    "version": "<string>",
    "capabilities": {
      "text": true,
      "vision": true,
      "audio": true,
      "image_generation": true,
      "tools": true,
      "streaming": true,
      "structured_output": true,
      "thinking": true,
      "input_token_limit": {},
      "output_token_limit": {}
    },
    "defaults": {
      "temperature": {},
      "top_p": {},
      "top_k": {},
      "max_output_tokens": {}
    },
    "provider_info": {},
    "provider_declared_generation_methods": {}
  }
}

Overview

Retrieves the complete list of models available to your organization, including models from OpenAI, Anthropic, Google, xAI, Mistral, Groq, Fireworks, and DeepSeek.

Method Signature

func (r *ModelService) List(
    ctx context.Context,
    opts ...option.RequestOption,
) (*ListModelsResponse, error)

Response Fields

object
string
Object type, always list
data
[]Model
required
Array of available models

Code Examples

List All Models

package main

import (
    "context"
    "fmt"
    "log"

    dedalus "github.com/dedalus-labs/dedalus-sdk-go"
    "github.com/dedalus-labs/dedalus-sdk-go/option"
)

func main() {
    client := dedalus.NewClient(
        option.WithAPIKey("your-api-key"),
    )

    ctx := context.Background()
    
    response, err := client.Models.List(ctx)
    if err != nil {
        log.Fatal(err)
    }

    fmt.Printf("Found %d models\n\n", len(response.Data))
    
    for _, model := range response.Data {
        fmt.Printf("ID: %s\n", model.ID)
        fmt.Printf("Provider: %s\n", model.Provider)
        if model.DisplayName != "" {
            fmt.Printf("Name: %s\n", model.DisplayName)
        }
        fmt.Println()
    }
}

Filter by Provider

response, err := client.Models.List(ctx)
if err != nil {
    log.Fatal(err)
}

// Filter OpenAI models
openaiModels := []dedalus.Model{}
for _, model := range response.Data {
    if model.Provider == dedalus.ModelProviderOpenAI {
        openaiModels = append(openaiModels, model)
    }
}

fmt.Printf("OpenAI models: %d\n", len(openaiModels))
for _, model := range openaiModels {
    fmt.Printf("  - %s\n", model.ID)
}

Find Models by Capability

response, err := client.Models.List(ctx)
if err != nil {
    log.Fatal(err)
}

// Find models that support vision
visionModels := []dedalus.Model{}
for _, model := range response.Data {
    if model.Capabilities.Vision {
        visionModels = append(visionModels, model)
    }
}

fmt.Printf("Vision-capable models: %d\n", len(visionModels))
for _, model := range visionModels {
    fmt.Printf("  - %s (%s)\n", model.ID, model.Provider)
}

Display Model Capabilities

response, err := client.Models.List(ctx)
if err != nil {
    log.Fatal(err)
}

for _, model := range response.Data {
    fmt.Printf("\nModel: %s\n", model.ID)
    
    if model.DisplayName != "" {
        fmt.Printf("Name: %s\n", model.DisplayName)
    }
    
    fmt.Println("Capabilities:")
    if model.Capabilities.Text {
        fmt.Println("  ✓ Text generation")
    }
    if model.Capabilities.Vision {
        fmt.Println("  ✓ Vision (image understanding)")
    }
    if model.Capabilities.Audio {
        fmt.Println("  ✓ Audio processing")
    }
    if model.Capabilities.ImageGeneration {
        fmt.Println("  ✓ Image generation")
    }
    if model.Capabilities.Tools {
        fmt.Println("  ✓ Function/tool calling")
    }
    if model.Capabilities.Streaming {
        fmt.Println("  ✓ Streaming")
    }
    if model.Capabilities.StructuredOutput {
        fmt.Println("  ✓ Structured output")
    }
    if model.Capabilities.Thinking {
        fmt.Println("  ✓ Extended thinking")
    }
    
    if model.Capabilities.InputTokenLimit > 0 {
        fmt.Printf("  Max input tokens: %d\n", model.Capabilities.InputTokenLimit)
    }
    if model.Capabilities.OutputTokenLimit > 0 {
        fmt.Printf("  Max output tokens: %d\n", model.Capabilities.OutputTokenLimit)
    }
}

Group Models by Provider

response, err := client.Models.List(ctx)
if err != nil {
    log.Fatal(err)
}

// Group by provider
modelsByProvider := make(map[dedalus.ModelProvider][]dedalus.Model)
for _, model := range response.Data {
    modelsByProvider[model.Provider] = append(modelsByProvider[model.Provider], model)
}

// Display grouped models
for provider, models := range modelsByProvider {
    fmt.Printf("\n%s (%d models):\n", provider, len(models))
    for _, model := range models {
        fmt.Printf("  - %s\n", model.ID)
    }
}

Find Models for Specific Use Case

response, err := client.Models.List(ctx)
if err != nil {
    log.Fatal(err)
}

// Find models suitable for multimodal chat (text + vision + tools)
fmt.Println("Multimodal chat models (text + vision + tools):")

for _, model := range response.Data {
    if model.Capabilities.Text && 
       model.Capabilities.Vision && 
       model.Capabilities.Tools {
        fmt.Printf("  - %s\n", model.ID)
        if model.Description != "" {
            fmt.Printf("    %s\n", model.Description)
        }
    }
}

Check Model Context Limits

response, err := client.Models.List(ctx)
if err != nil {
    log.Fatal(err)
}

// Find models with large context windows (>100k tokens)
fmt.Println("\nModels with large context windows (>100k tokens):")

for _, model := range response.Data {
    if model.Capabilities.InputTokenLimit > 100000 {
        fmt.Printf("  - %s: %d tokens\n", 
            model.ID, 
            model.Capabilities.InputTokenLimit)
    }
}

Export Model Information

import "encoding/json"

response, err := client.Models.List(ctx)
if err != nil {
    log.Fatal(err)
}

// Export to JSON
jsonData, err := json.MarshalIndent(response.Data, "", "  ")
if err != nil {
    log.Fatal(err)
}

err = os.WriteFile("models.json", jsonData, 0644)
if err != nil {
    log.Fatal(err)
}

fmt.Println("Model information exported to models.json")

Supported Providers

The API provides models from the following providers:
  • OpenAI - GPT-4, GPT-3.5, DALL-E, Whisper, TTS
  • Anthropic - Claude 3 family (Opus, Sonnet, Haiku)
  • Google - Gemini models
  • xAI - Grok models
  • Mistral - Mistral and Mixtral models
  • Groq - High-speed inference models
  • Fireworks - Various open-source models
  • DeepSeek - DeepSeek models

Model Capabilities

Models can have the following capabilities:
  • Text: Text generation and understanding
  • Vision: Image understanding and analysis
  • Audio: Audio processing (speech, transcription)
  • Image Generation: Creating images from text
  • Tools: Function and tool calling
  • Streaming: Real-time response streaming
  • Structured Output: JSON schema compliance
  • Thinking: Extended reasoning (o1 models)

Use Cases

  • Discovering available models for your use case
  • Finding models with specific capabilities
  • Comparing token limits across models
  • Listing models by provider
  • Building dynamic model selection UIs
  • Checking model availability before making requests

Best Practices

  1. Cache Results: List models periodically and cache the results
  2. Capability Checking: Verify model capabilities before use
  3. Provider Selection: Choose providers based on your requirements
  4. Token Limits: Consider input/output limits for your use case
  5. Model Updates: Refresh model list regularly to discover new models

Build docs developers (and LLMs) love