Documentation
¶
Index ¶
Constants ¶
View Source
const ( ProviderAnthropic ModelProvider = "anthropic" // Models Claude35Sonnet ModelID = "claude-3.5-sonnet" Claude3Haiku ModelID = "claude-3-haiku" Claude37Sonnet ModelID = "claude-3.7-sonnet" Claude35Haiku ModelID = "claude-3.5-haiku" Claude3Opus ModelID = "claude-3-opus" )
View Source
const ( ProviderGemini ModelProvider = "gemini" // Models Gemini25Flash ModelID = "gemini-2.5-flash" Gemini25 ModelID = "gemini-2.5" Gemini20Flash ModelID = "gemini-2.0-flash" Gemini20FlashLite ModelID = "gemini-2.0-flash-lite" )
View Source
const ( ProviderGROQ ModelProvider = "groq" // GROQ QWENQwq ModelID = "qwen-qwq" // GROQ preview models Llama4Scout ModelID = "meta-llama/llama-4-scout-17b-16e-instruct" Llama4Maverick ModelID = "meta-llama/llama-4-maverick-17b-128e-instruct" Llama3_3_70BVersatile ModelID = "llama-3.3-70b-versatile" DeepseekR1DistillLlama70b ModelID = "deepseek-r1-distill-llama-70b" )
View Source
const ( ProviderOpenAI ModelProvider = "openai" GPT41 ModelID = "gpt-4.1" GPT41Mini ModelID = "gpt-4.1-mini" GPT41Nano ModelID = "gpt-4.1-nano" GPT45Preview ModelID = "gpt-4.5-preview" GPT4o ModelID = "gpt-4o" GPT4oMini ModelID = "gpt-4o-mini" O1 ModelID = "o1" O1Pro ModelID = "o1-pro" O1Mini ModelID = "o1-mini" O3 ModelID = "o3" O3Mini ModelID = "o3-mini" O4Mini ModelID = "o4-mini" )
View Source
const ( ProviderOpenRouter ModelProvider = "openrouter" OpenRouterGPT41 ModelID = "openrouter.gpt-4.1" OpenRouterGPT41Mini ModelID = "openrouter.gpt-4.1-mini" OpenRouterGPT41Nano ModelID = "openrouter.gpt-4.1-nano" OpenRouterGPT45Preview ModelID = "openrouter.gpt-4.5-preview" OpenRouterGPT4o ModelID = "openrouter.gpt-4o" OpenRouterGPT4oMini ModelID = "openrouter.gpt-4o-mini" OpenRouterO1 ModelID = "openrouter.o1" OpenRouterO1Pro ModelID = "openrouter.o1-pro" OpenRouterO1Mini ModelID = "openrouter.o1-mini" OpenRouterO3 ModelID = "openrouter.o3" OpenRouterO3Mini ModelID = "openrouter.o3-mini" OpenRouterO4Mini ModelID = "openrouter.o4-mini" OpenRouterGemini25Flash ModelID = "openrouter.gemini-2.5-flash" OpenRouterGemini25 ModelID = "openrouter.gemini-2.5" OpenRouterClaude35Sonnet ModelID = "openrouter.claude-3.5-sonnet" OpenRouterClaude3Haiku ModelID = "openrouter.claude-3-haiku" OpenRouterClaude37Sonnet ModelID = "openrouter.claude-3.7-sonnet" OpenRouterClaude35Haiku ModelID = "openrouter.claude-3.5-haiku" OpenRouterClaude3Opus ModelID = "openrouter.claude-3-opus" )
Variables ¶
View Source
var AnthropicModels = map[ModelID]Model{ Claude35Sonnet: { ID: Claude35Sonnet, Name: "Claude 3.5 Sonnet", Provider: ProviderAnthropic, APIModel: "claude-3-5-sonnet-latest", CostPer1MIn: 3.0, CostPer1MInCached: 3.75, CostPer1MOutCached: 0.30, CostPer1MOut: 15.0, ContextWindow: 200000, DefaultMaxTokens: 5000, }, Claude3Haiku: { ID: Claude3Haiku, Name: "Claude 3 Haiku", Provider: ProviderAnthropic, APIModel: "claude-3-haiku-20240307", CostPer1MIn: 0.25, CostPer1MInCached: 0.30, CostPer1MOutCached: 0.03, CostPer1MOut: 1.25, ContextWindow: 200000, DefaultMaxTokens: 4096, }, Claude37Sonnet: { ID: Claude37Sonnet, Name: "Claude 3.7 Sonnet", Provider: ProviderAnthropic, APIModel: "claude-3-7-sonnet-latest", CostPer1MIn: 3.0, CostPer1MInCached: 3.75, CostPer1MOutCached: 0.30, CostPer1MOut: 15.0, ContextWindow: 200000, DefaultMaxTokens: 50000, CanReason: true, }, Claude35Haiku: { ID: Claude35Haiku, Name: "Claude 3.5 Haiku", Provider: ProviderAnthropic, APIModel: "claude-3-5-haiku-latest", CostPer1MIn: 0.80, CostPer1MInCached: 1.0, CostPer1MOutCached: 0.08, CostPer1MOut: 4.0, ContextWindow: 200000, DefaultMaxTokens: 4096, }, Claude3Opus: { ID: Claude3Opus, Name: "Claude 3 Opus", Provider: ProviderAnthropic, APIModel: "claude-3-opus-latest", CostPer1MIn: 15.0, CostPer1MInCached: 18.75, CostPer1MOutCached: 1.50, CostPer1MOut: 75.0, ContextWindow: 200000, DefaultMaxTokens: 4096, }, }
https://docs.anthropic.com/en/docs/about-claude/models/all-models
View Source
var AzureModels = map[ModelID]Model{ AzureGPT41: { ID: AzureGPT41, Name: "Azure OpenAI – GPT 4.1", Provider: ProviderAzure, APIModel: "gpt-4.1", CostPer1MIn: OpenAIModels[GPT41].CostPer1MIn, CostPer1MInCached: OpenAIModels[GPT41].CostPer1MInCached, CostPer1MOut: OpenAIModels[GPT41].CostPer1MOut, CostPer1MOutCached: OpenAIModels[GPT41].CostPer1MOutCached, ContextWindow: OpenAIModels[GPT41].ContextWindow, DefaultMaxTokens: OpenAIModels[GPT41].DefaultMaxTokens, }, AzureGPT41Mini: { ID: AzureGPT41Mini, Name: "Azure OpenAI – GPT 4.1 mini", Provider: ProviderAzure, APIModel: "gpt-4.1-mini", CostPer1MIn: OpenAIModels[GPT41Mini].CostPer1MIn, CostPer1MInCached: OpenAIModels[GPT41Mini].CostPer1MInCached, CostPer1MOut: OpenAIModels[GPT41Mini].CostPer1MOut, CostPer1MOutCached: OpenAIModels[GPT41Mini].CostPer1MOutCached, ContextWindow: OpenAIModels[GPT41Mini].ContextWindow, DefaultMaxTokens: OpenAIModels[GPT41Mini].DefaultMaxTokens, }, AzureGPT41Nano: { ID: AzureGPT41Nano, Name: "Azure OpenAI – GPT 4.1 nano", Provider: ProviderAzure, APIModel: "gpt-4.1-nano", CostPer1MIn: OpenAIModels[GPT41Nano].CostPer1MIn, CostPer1MInCached: OpenAIModels[GPT41Nano].CostPer1MInCached, CostPer1MOut: OpenAIModels[GPT41Nano].CostPer1MOut, CostPer1MOutCached: OpenAIModels[GPT41Nano].CostPer1MOutCached, ContextWindow: OpenAIModels[GPT41Nano].ContextWindow, DefaultMaxTokens: OpenAIModels[GPT41Nano].DefaultMaxTokens, }, AzureGPT45Preview: { ID: AzureGPT45Preview, Name: "Azure OpenAI – GPT 4.5 preview", Provider: ProviderAzure, APIModel: "gpt-4.5-preview", CostPer1MIn: OpenAIModels[GPT45Preview].CostPer1MIn, CostPer1MInCached: OpenAIModels[GPT45Preview].CostPer1MInCached, CostPer1MOut: OpenAIModels[GPT45Preview].CostPer1MOut, CostPer1MOutCached: OpenAIModels[GPT45Preview].CostPer1MOutCached, ContextWindow: OpenAIModels[GPT45Preview].ContextWindow, DefaultMaxTokens: OpenAIModels[GPT45Preview].DefaultMaxTokens, }, AzureGPT4o: { ID: AzureGPT4o, Name: "Azure OpenAI – GPT-4o", Provider: ProviderAzure, APIModel: "gpt-4o", CostPer1MIn: OpenAIModels[GPT4o].CostPer1MIn, CostPer1MInCached: OpenAIModels[GPT4o].CostPer1MInCached, CostPer1MOut: OpenAIModels[GPT4o].CostPer1MOut, CostPer1MOutCached: OpenAIModels[GPT4o].CostPer1MOutCached, ContextWindow: OpenAIModels[GPT4o].ContextWindow, DefaultMaxTokens: OpenAIModels[GPT4o].DefaultMaxTokens, }, AzureGPT4oMini: { ID: AzureGPT4oMini, Name: "Azure OpenAI – GPT-4o mini", Provider: ProviderAzure, APIModel: "gpt-4o-mini", CostPer1MIn: OpenAIModels[GPT4oMini].CostPer1MIn, CostPer1MInCached: OpenAIModels[GPT4oMini].CostPer1MInCached, CostPer1MOut: OpenAIModels[GPT4oMini].CostPer1MOut, CostPer1MOutCached: OpenAIModels[GPT4oMini].CostPer1MOutCached, ContextWindow: OpenAIModels[GPT4oMini].ContextWindow, DefaultMaxTokens: OpenAIModels[GPT4oMini].DefaultMaxTokens, }, AzureO1: { ID: AzureO1, Name: "Azure OpenAI – O1", Provider: ProviderAzure, APIModel: "o1", CostPer1MIn: OpenAIModels[O1].CostPer1MIn, CostPer1MInCached: OpenAIModels[O1].CostPer1MInCached, CostPer1MOut: OpenAIModels[O1].CostPer1MOut, CostPer1MOutCached: OpenAIModels[O1].CostPer1MOutCached, ContextWindow: OpenAIModels[O1].ContextWindow, DefaultMaxTokens: OpenAIModels[O1].DefaultMaxTokens, CanReason: OpenAIModels[O1].CanReason, }, AzureO1Mini: { ID: AzureO1Mini, Name: "Azure OpenAI – O1 mini", Provider: ProviderAzure, APIModel: "o1-mini", CostPer1MIn: OpenAIModels[O1Mini].CostPer1MIn, CostPer1MInCached: OpenAIModels[O1Mini].CostPer1MInCached, CostPer1MOut: OpenAIModels[O1Mini].CostPer1MOut, CostPer1MOutCached: OpenAIModels[O1Mini].CostPer1MOutCached, ContextWindow: OpenAIModels[O1Mini].ContextWindow, DefaultMaxTokens: OpenAIModels[O1Mini].DefaultMaxTokens, CanReason: OpenAIModels[O1Mini].CanReason, }, AzureO3: { ID: AzureO3, Name: "Azure OpenAI – O3", Provider: ProviderAzure, APIModel: "o3", CostPer1MIn: OpenAIModels[O3].CostPer1MIn, CostPer1MInCached: OpenAIModels[O3].CostPer1MInCached, CostPer1MOut: OpenAIModels[O3].CostPer1MOut, CostPer1MOutCached: OpenAIModels[O3].CostPer1MOutCached, ContextWindow: OpenAIModels[O3].ContextWindow, DefaultMaxTokens: OpenAIModels[O3].DefaultMaxTokens, CanReason: OpenAIModels[O3].CanReason, }, AzureO3Mini: { ID: AzureO3Mini, Name: "Azure OpenAI – O3 mini", Provider: ProviderAzure, APIModel: "o3-mini", CostPer1MIn: OpenAIModels[O3Mini].CostPer1MIn, CostPer1MInCached: OpenAIModels[O3Mini].CostPer1MInCached, CostPer1MOut: OpenAIModels[O3Mini].CostPer1MOut, CostPer1MOutCached: OpenAIModels[O3Mini].CostPer1MOutCached, ContextWindow: OpenAIModels[O3Mini].ContextWindow, DefaultMaxTokens: OpenAIModels[O3Mini].DefaultMaxTokens, CanReason: OpenAIModels[O3Mini].CanReason, }, AzureO4Mini: { ID: AzureO4Mini, Name: "Azure OpenAI – O4 mini", Provider: ProviderAzure, APIModel: "o4-mini", CostPer1MIn: OpenAIModels[O4Mini].CostPer1MIn, CostPer1MInCached: OpenAIModels[O4Mini].CostPer1MInCached, CostPer1MOut: OpenAIModels[O4Mini].CostPer1MOut, CostPer1MOutCached: OpenAIModels[O4Mini].CostPer1MOutCached, ContextWindow: OpenAIModels[O4Mini].ContextWindow, DefaultMaxTokens: OpenAIModels[O4Mini].DefaultMaxTokens, CanReason: OpenAIModels[O4Mini].CanReason, }, }
View Source
var GeminiModels = map[ModelID]Model{ Gemini25Flash: { ID: Gemini25Flash, Name: "Gemini 2.5 Flash", Provider: ProviderGemini, APIModel: "gemini-2.5-flash-preview-04-17", CostPer1MIn: 0.15, CostPer1MInCached: 0, CostPer1MOutCached: 0, CostPer1MOut: 0.60, ContextWindow: 1000000, DefaultMaxTokens: 50000, }, Gemini25: { ID: Gemini25, Name: "Gemini 2.5 Pro", Provider: ProviderGemini, APIModel: "gemini-2.5-pro-preview-03-25", CostPer1MIn: 1.25, CostPer1MInCached: 0, CostPer1MOutCached: 0, CostPer1MOut: 10, ContextWindow: 1000000, DefaultMaxTokens: 50000, }, Gemini20Flash: { ID: Gemini20Flash, Name: "Gemini 2.0 Flash", Provider: ProviderGemini, APIModel: "gemini-2.0-flash", CostPer1MIn: 0.10, CostPer1MInCached: 0, CostPer1MOutCached: 0, CostPer1MOut: 0.40, ContextWindow: 1000000, DefaultMaxTokens: 6000, }, Gemini20FlashLite: { ID: Gemini20FlashLite, Name: "Gemini 2.0 Flash Lite", Provider: ProviderGemini, APIModel: "gemini-2.0-flash-lite", CostPer1MIn: 0.05, CostPer1MInCached: 0, CostPer1MOutCached: 0, CostPer1MOut: 0.30, ContextWindow: 1000000, DefaultMaxTokens: 6000, }, }
View Source
var GroqModels = map[ModelID]Model{ QWENQwq: { ID: QWENQwq, Name: "Qwen Qwq", Provider: ProviderGROQ, APIModel: "qwen-qwq-32b", CostPer1MIn: 0.29, CostPer1MInCached: 0.275, CostPer1MOutCached: 0.0, CostPer1MOut: 0.39, ContextWindow: 128_000, DefaultMaxTokens: 50000, CanReason: false, }, Llama4Scout: { ID: Llama4Scout, Name: "Llama4Scout", Provider: ProviderGROQ, APIModel: "meta-llama/llama-4-scout-17b-16e-instruct", CostPer1MIn: 0.11, CostPer1MInCached: 0, CostPer1MOutCached: 0, CostPer1MOut: 0.34, ContextWindow: 128_000, }, Llama4Maverick: { ID: Llama4Maverick, Name: "Llama4Maverick", Provider: ProviderGROQ, APIModel: "meta-llama/llama-4-maverick-17b-128e-instruct", CostPer1MIn: 0.20, CostPer1MInCached: 0, CostPer1MOutCached: 0, CostPer1MOut: 0.20, ContextWindow: 128_000, }, Llama3_3_70BVersatile: { ID: Llama3_3_70BVersatile, Name: "Llama3_3_70BVersatile", Provider: ProviderGROQ, APIModel: "llama-3.3-70b-versatile", CostPer1MIn: 0.59, CostPer1MInCached: 0, CostPer1MOutCached: 0, CostPer1MOut: 0.79, ContextWindow: 128_000, }, DeepseekR1DistillLlama70b: { ID: DeepseekR1DistillLlama70b, Name: "DeepseekR1DistillLlama70b", Provider: ProviderGROQ, APIModel: "deepseek-r1-distill-llama-70b", CostPer1MIn: 0.75, CostPer1MInCached: 0, CostPer1MOutCached: 0, CostPer1MOut: 0.99, ContextWindow: 128_000, CanReason: true, }, }
View Source
var OpenAIModels = map[ModelID]Model{ GPT41: { ID: GPT41, Name: "GPT 4.1", Provider: ProviderOpenAI, APIModel: "gpt-4.1", CostPer1MIn: 2.00, CostPer1MInCached: 0.50, CostPer1MOutCached: 0.0, CostPer1MOut: 8.00, ContextWindow: 1_047_576, DefaultMaxTokens: 20000, }, GPT41Mini: { ID: GPT41Mini, Name: "GPT 4.1 mini", Provider: ProviderOpenAI, APIModel: "gpt-4.1", CostPer1MIn: 0.40, CostPer1MInCached: 0.10, CostPer1MOutCached: 0.0, CostPer1MOut: 1.60, ContextWindow: 200_000, DefaultMaxTokens: 20000, }, GPT41Nano: { ID: GPT41Nano, Name: "GPT 4.1 nano", Provider: ProviderOpenAI, APIModel: "gpt-4.1-nano", CostPer1MIn: 0.10, CostPer1MInCached: 0.025, CostPer1MOutCached: 0.0, CostPer1MOut: 0.40, ContextWindow: 1_047_576, DefaultMaxTokens: 20000, }, GPT45Preview: { ID: GPT45Preview, Name: "GPT 4.5 preview", Provider: ProviderOpenAI, APIModel: "gpt-4.5-preview", CostPer1MIn: 75.00, CostPer1MInCached: 37.50, CostPer1MOutCached: 0.0, CostPer1MOut: 150.00, ContextWindow: 128_000, DefaultMaxTokens: 15000, }, GPT4o: { ID: GPT4o, Name: "GPT 4o", Provider: ProviderOpenAI, APIModel: "gpt-4o", CostPer1MIn: 2.50, CostPer1MInCached: 1.25, CostPer1MOutCached: 0.0, CostPer1MOut: 10.00, ContextWindow: 128_000, DefaultMaxTokens: 4096, }, GPT4oMini: { ID: GPT4oMini, Name: "GPT 4o mini", Provider: ProviderOpenAI, APIModel: "gpt-4o-mini", CostPer1MIn: 0.15, CostPer1MInCached: 0.075, CostPer1MOutCached: 0.0, CostPer1MOut: 0.60, ContextWindow: 128_000, }, O1: { ID: O1, Name: "O1", Provider: ProviderOpenAI, APIModel: "o1", CostPer1MIn: 15.00, CostPer1MInCached: 7.50, CostPer1MOutCached: 0.0, CostPer1MOut: 60.00, ContextWindow: 200_000, DefaultMaxTokens: 50000, CanReason: true, }, O1Pro: { ID: O1Pro, Name: "o1 pro", Provider: ProviderOpenAI, APIModel: "o1-pro", CostPer1MIn: 150.00, CostPer1MInCached: 0.0, CostPer1MOutCached: 0.0, CostPer1MOut: 600.00, ContextWindow: 200_000, DefaultMaxTokens: 50000, CanReason: true, }, O1Mini: { ID: O1Mini, Name: "o1 mini", Provider: ProviderOpenAI, APIModel: "o1-mini", CostPer1MIn: 1.10, CostPer1MInCached: 0.55, CostPer1MOutCached: 0.0, CostPer1MOut: 4.40, ContextWindow: 128_000, DefaultMaxTokens: 50000, CanReason: true, }, O3: { ID: O3, Name: "o3", Provider: ProviderOpenAI, APIModel: "o3", CostPer1MIn: 10.00, CostPer1MInCached: 2.50, CostPer1MOutCached: 0.0, CostPer1MOut: 40.00, ContextWindow: 200_000, CanReason: true, }, O3Mini: { ID: O3Mini, Name: "o3 mini", Provider: ProviderOpenAI, APIModel: "o3-mini", CostPer1MIn: 1.10, CostPer1MInCached: 0.55, CostPer1MOutCached: 0.0, CostPer1MOut: 4.40, ContextWindow: 200_000, DefaultMaxTokens: 50000, CanReason: true, }, O4Mini: { ID: O4Mini, Name: "o4 mini", Provider: ProviderOpenAI, APIModel: "o4-mini", CostPer1MIn: 1.10, CostPer1MInCached: 0.275, CostPer1MOutCached: 0.0, CostPer1MOut: 4.40, ContextWindow: 128_000, DefaultMaxTokens: 50000, CanReason: true, }, }
View Source
var OpenRouterModels = map[ModelID]Model{ OpenRouterGPT41: { ID: OpenRouterGPT41, Name: "OpenRouter – GPT 4.1", Provider: ProviderOpenRouter, APIModel: "openai/gpt-4.1", CostPer1MIn: OpenAIModels[GPT41].CostPer1MIn, CostPer1MInCached: OpenAIModels[GPT41].CostPer1MInCached, CostPer1MOut: OpenAIModels[GPT41].CostPer1MOut, CostPer1MOutCached: OpenAIModels[GPT41].CostPer1MOutCached, ContextWindow: OpenAIModels[GPT41].ContextWindow, DefaultMaxTokens: OpenAIModels[GPT41].DefaultMaxTokens, }, OpenRouterGPT41Mini: { ID: OpenRouterGPT41Mini, Name: "OpenRouter – GPT 4.1 mini", Provider: ProviderOpenRouter, APIModel: "openai/gpt-4.1-mini", CostPer1MIn: OpenAIModels[GPT41Mini].CostPer1MIn, CostPer1MInCached: OpenAIModels[GPT41Mini].CostPer1MInCached, CostPer1MOut: OpenAIModels[GPT41Mini].CostPer1MOut, CostPer1MOutCached: OpenAIModels[GPT41Mini].CostPer1MOutCached, ContextWindow: OpenAIModels[GPT41Mini].ContextWindow, DefaultMaxTokens: OpenAIModels[GPT41Mini].DefaultMaxTokens, }, OpenRouterGPT41Nano: { ID: OpenRouterGPT41Nano, Name: "OpenRouter – GPT 4.1 nano", Provider: ProviderOpenRouter, APIModel: "openai/gpt-4.1-nano", CostPer1MIn: OpenAIModels[GPT41Nano].CostPer1MIn, CostPer1MInCached: OpenAIModels[GPT41Nano].CostPer1MInCached, CostPer1MOut: OpenAIModels[GPT41Nano].CostPer1MOut, CostPer1MOutCached: OpenAIModels[GPT41Nano].CostPer1MOutCached, ContextWindow: OpenAIModels[GPT41Nano].ContextWindow, DefaultMaxTokens: OpenAIModels[GPT41Nano].DefaultMaxTokens, }, OpenRouterGPT45Preview: { ID: OpenRouterGPT45Preview, Name: "OpenRouter – GPT 4.5 preview", Provider: ProviderOpenRouter, APIModel: "openai/gpt-4.5-preview", CostPer1MIn: OpenAIModels[GPT45Preview].CostPer1MIn, CostPer1MInCached: OpenAIModels[GPT45Preview].CostPer1MInCached, CostPer1MOut: OpenAIModels[GPT45Preview].CostPer1MOut, CostPer1MOutCached: OpenAIModels[GPT45Preview].CostPer1MOutCached, ContextWindow: OpenAIModels[GPT45Preview].ContextWindow, DefaultMaxTokens: OpenAIModels[GPT45Preview].DefaultMaxTokens, }, OpenRouterGPT4o: { ID: OpenRouterGPT4o, Name: "OpenRouter – GPT 4o", Provider: ProviderOpenRouter, APIModel: "openai/gpt-4o", CostPer1MIn: OpenAIModels[GPT4o].CostPer1MIn, CostPer1MInCached: OpenAIModels[GPT4o].CostPer1MInCached, CostPer1MOut: OpenAIModels[GPT4o].CostPer1MOut, CostPer1MOutCached: OpenAIModels[GPT4o].CostPer1MOutCached, ContextWindow: OpenAIModels[GPT4o].ContextWindow, DefaultMaxTokens: OpenAIModels[GPT4o].DefaultMaxTokens, }, OpenRouterGPT4oMini: { ID: OpenRouterGPT4oMini, Name: "OpenRouter – GPT 4o mini", Provider: ProviderOpenRouter, APIModel: "openai/gpt-4o-mini", CostPer1MIn: OpenAIModels[GPT4oMini].CostPer1MIn, CostPer1MInCached: OpenAIModels[GPT4oMini].CostPer1MInCached, CostPer1MOut: OpenAIModels[GPT4oMini].CostPer1MOut, CostPer1MOutCached: OpenAIModels[GPT4oMini].CostPer1MOutCached, ContextWindow: OpenAIModels[GPT4oMini].ContextWindow, }, OpenRouterO1: { ID: OpenRouterO1, Name: "OpenRouter – O1", Provider: ProviderOpenRouter, APIModel: "openai/o1", CostPer1MIn: OpenAIModels[O1].CostPer1MIn, CostPer1MInCached: OpenAIModels[O1].CostPer1MInCached, CostPer1MOut: OpenAIModels[O1].CostPer1MOut, CostPer1MOutCached: OpenAIModels[O1].CostPer1MOutCached, ContextWindow: OpenAIModels[O1].ContextWindow, DefaultMaxTokens: OpenAIModels[O1].DefaultMaxTokens, CanReason: OpenAIModels[O1].CanReason, }, OpenRouterO1Pro: { ID: OpenRouterO1Pro, Name: "OpenRouter – o1 pro", Provider: ProviderOpenRouter, APIModel: "openai/o1-pro", CostPer1MIn: OpenAIModels[O1Pro].CostPer1MIn, CostPer1MInCached: OpenAIModels[O1Pro].CostPer1MInCached, CostPer1MOut: OpenAIModels[O1Pro].CostPer1MOut, CostPer1MOutCached: OpenAIModels[O1Pro].CostPer1MOutCached, ContextWindow: OpenAIModels[O1Pro].ContextWindow, DefaultMaxTokens: OpenAIModels[O1Pro].DefaultMaxTokens, CanReason: OpenAIModels[O1Pro].CanReason, }, OpenRouterO1Mini: { ID: OpenRouterO1Mini, Name: "OpenRouter – o1 mini", Provider: ProviderOpenRouter, APIModel: "openai/o1-mini", CostPer1MIn: OpenAIModels[O1Mini].CostPer1MIn, CostPer1MInCached: OpenAIModels[O1Mini].CostPer1MInCached, CostPer1MOut: OpenAIModels[O1Mini].CostPer1MOut, CostPer1MOutCached: OpenAIModels[O1Mini].CostPer1MOutCached, ContextWindow: OpenAIModels[O1Mini].ContextWindow, DefaultMaxTokens: OpenAIModels[O1Mini].DefaultMaxTokens, CanReason: OpenAIModels[O1Mini].CanReason, }, OpenRouterO3: { ID: OpenRouterO3, Name: "OpenRouter – o3", Provider: ProviderOpenRouter, APIModel: "openai/o3", CostPer1MIn: OpenAIModels[O3].CostPer1MIn, CostPer1MInCached: OpenAIModels[O3].CostPer1MInCached, CostPer1MOut: OpenAIModels[O3].CostPer1MOut, CostPer1MOutCached: OpenAIModels[O3].CostPer1MOutCached, ContextWindow: OpenAIModels[O3].ContextWindow, DefaultMaxTokens: OpenAIModels[O3].DefaultMaxTokens, CanReason: OpenAIModels[O3].CanReason, }, OpenRouterO3Mini: { ID: OpenRouterO3Mini, Name: "OpenRouter – o3 mini", Provider: ProviderOpenRouter, APIModel: "openai/o3-mini-high", CostPer1MIn: OpenAIModels[O3Mini].CostPer1MIn, CostPer1MInCached: OpenAIModels[O3Mini].CostPer1MInCached, CostPer1MOut: OpenAIModels[O3Mini].CostPer1MOut, CostPer1MOutCached: OpenAIModels[O3Mini].CostPer1MOutCached, ContextWindow: OpenAIModels[O3Mini].ContextWindow, DefaultMaxTokens: OpenAIModels[O3Mini].DefaultMaxTokens, CanReason: OpenAIModels[O3Mini].CanReason, }, OpenRouterO4Mini: { ID: OpenRouterO4Mini, Name: "OpenRouter – o4 mini", Provider: ProviderOpenRouter, APIModel: "openai/o4-mini-high", CostPer1MIn: OpenAIModels[O4Mini].CostPer1MIn, CostPer1MInCached: OpenAIModels[O4Mini].CostPer1MInCached, CostPer1MOut: OpenAIModels[O4Mini].CostPer1MOut, CostPer1MOutCached: OpenAIModels[O4Mini].CostPer1MOutCached, ContextWindow: OpenAIModels[O4Mini].ContextWindow, DefaultMaxTokens: OpenAIModels[O4Mini].DefaultMaxTokens, CanReason: OpenAIModels[O4Mini].CanReason, }, OpenRouterGemini25Flash: { ID: OpenRouterGemini25Flash, Name: "OpenRouter – Gemini 2.5 Flash", Provider: ProviderOpenRouter, APIModel: "google/gemini-2.5-flash-preview:thinking", CostPer1MIn: GeminiModels[Gemini25Flash].CostPer1MIn, CostPer1MInCached: GeminiModels[Gemini25Flash].CostPer1MInCached, CostPer1MOut: GeminiModels[Gemini25Flash].CostPer1MOut, CostPer1MOutCached: GeminiModels[Gemini25Flash].CostPer1MOutCached, ContextWindow: GeminiModels[Gemini25Flash].ContextWindow, DefaultMaxTokens: GeminiModels[Gemini25Flash].DefaultMaxTokens, }, OpenRouterGemini25: { ID: OpenRouterGemini25, Name: "OpenRouter – Gemini 2.5 Pro", Provider: ProviderOpenRouter, APIModel: "google/gemini-2.5-pro-preview-03-25", CostPer1MIn: GeminiModels[Gemini25].CostPer1MIn, CostPer1MInCached: GeminiModels[Gemini25].CostPer1MInCached, CostPer1MOut: GeminiModels[Gemini25].CostPer1MOut, CostPer1MOutCached: GeminiModels[Gemini25].CostPer1MOutCached, ContextWindow: GeminiModels[Gemini25].ContextWindow, DefaultMaxTokens: GeminiModels[Gemini25].DefaultMaxTokens, }, OpenRouterClaude35Sonnet: { ID: OpenRouterClaude35Sonnet, Name: "OpenRouter – Claude 3.5 Sonnet", Provider: ProviderOpenRouter, APIModel: "anthropic/claude-3.5-sonnet", CostPer1MIn: AnthropicModels[Claude35Sonnet].CostPer1MIn, CostPer1MInCached: AnthropicModels[Claude35Sonnet].CostPer1MInCached, CostPer1MOut: AnthropicModels[Claude35Sonnet].CostPer1MOut, CostPer1MOutCached: AnthropicModels[Claude35Sonnet].CostPer1MOutCached, ContextWindow: AnthropicModels[Claude35Sonnet].ContextWindow, DefaultMaxTokens: AnthropicModels[Claude35Sonnet].DefaultMaxTokens, }, OpenRouterClaude3Haiku: { ID: OpenRouterClaude3Haiku, Name: "OpenRouter – Claude 3 Haiku", Provider: ProviderOpenRouter, APIModel: "anthropic/claude-3-haiku", CostPer1MIn: AnthropicModels[Claude3Haiku].CostPer1MIn, CostPer1MInCached: AnthropicModels[Claude3Haiku].CostPer1MInCached, CostPer1MOut: AnthropicModels[Claude3Haiku].CostPer1MOut, CostPer1MOutCached: AnthropicModels[Claude3Haiku].CostPer1MOutCached, ContextWindow: AnthropicModels[Claude3Haiku].ContextWindow, DefaultMaxTokens: AnthropicModels[Claude3Haiku].DefaultMaxTokens, }, OpenRouterClaude37Sonnet: { ID: OpenRouterClaude37Sonnet, Name: "OpenRouter – Claude 3.7 Sonnet", Provider: ProviderOpenRouter, APIModel: "anthropic/claude-3.7-sonnet", CostPer1MIn: AnthropicModels[Claude37Sonnet].CostPer1MIn, CostPer1MInCached: AnthropicModels[Claude37Sonnet].CostPer1MInCached, CostPer1MOut: AnthropicModels[Claude37Sonnet].CostPer1MOut, CostPer1MOutCached: AnthropicModels[Claude37Sonnet].CostPer1MOutCached, ContextWindow: AnthropicModels[Claude37Sonnet].ContextWindow, DefaultMaxTokens: AnthropicModels[Claude37Sonnet].DefaultMaxTokens, CanReason: AnthropicModels[Claude37Sonnet].CanReason, }, OpenRouterClaude35Haiku: { ID: OpenRouterClaude35Haiku, Name: "OpenRouter – Claude 3.5 Haiku", Provider: ProviderOpenRouter, APIModel: "anthropic/claude-3.5-haiku", CostPer1MIn: AnthropicModels[Claude35Haiku].CostPer1MIn, CostPer1MInCached: AnthropicModels[Claude35Haiku].CostPer1MInCached, CostPer1MOut: AnthropicModels[Claude35Haiku].CostPer1MOut, CostPer1MOutCached: AnthropicModels[Claude35Haiku].CostPer1MOutCached, ContextWindow: AnthropicModels[Claude35Haiku].ContextWindow, DefaultMaxTokens: AnthropicModels[Claude35Haiku].DefaultMaxTokens, }, OpenRouterClaude3Opus: { ID: OpenRouterClaude3Opus, Name: "OpenRouter – Claude 3 Opus", Provider: ProviderOpenRouter, APIModel: "anthropic/claude-3-opus", CostPer1MIn: AnthropicModels[Claude3Opus].CostPer1MIn, CostPer1MInCached: AnthropicModels[Claude3Opus].CostPer1MInCached, CostPer1MOut: AnthropicModels[Claude3Opus].CostPer1MOut, CostPer1MOutCached: AnthropicModels[Claude3Opus].CostPer1MOutCached, ContextWindow: AnthropicModels[Claude3Opus].ContextWindow, DefaultMaxTokens: AnthropicModels[Claude3Opus].DefaultMaxTokens, }, }
View Source
var ProviderPopularity = map[ModelProvider]int{ ProviderAnthropic: 1, ProviderOpenAI: 2, ProviderGemini: 3, ProviderGROQ: 4, ProviderOpenRouter: 5, ProviderBedrock: 6, ProviderAzure: 7, }
Providers in order of popularity
View Source
var SupportedModels = map[ModelID]Model{ BedrockClaude37Sonnet: { ID: BedrockClaude37Sonnet, Name: "Bedrock: Claude 3.7 Sonnet", Provider: ProviderBedrock, APIModel: "anthropic.claude-3-7-sonnet-20250219-v1:0", CostPer1MIn: 3.0, CostPer1MInCached: 3.75, CostPer1MOutCached: 0.30, CostPer1MOut: 15.0, }, }
Functions ¶
This section is empty.
Types ¶
type Model ¶
type Model struct { ID ModelID `json:"id"` Name string `json:"name"` Provider ModelProvider `json:"provider"` APIModel string `json:"api_model"` CostPer1MIn float64 `json:"cost_per_1m_in"` CostPer1MOut float64 `json:"cost_per_1m_out"` CostPer1MInCached float64 `json:"cost_per_1m_in_cached"` CostPer1MOutCached float64 `json:"cost_per_1m_out_cached"` ContextWindow int64 `json:"context_window"` DefaultMaxTokens int64 `json:"default_max_tokens"` CanReason bool `json:"can_reason"` }
type ModelID ¶
type ModelID string
const ( AzureGPT41 ModelID = "azure.gpt-4.1" AzureGPT41Mini ModelID = "azure.gpt-4.1-mini" AzureGPT41Nano ModelID = "azure.gpt-4.1-nano" AzureGPT45Preview ModelID = "azure.gpt-4.5-preview" AzureGPT4o ModelID = "azure.gpt-4o" AzureGPT4oMini ModelID = "azure.gpt-4o-mini" AzureO1 ModelID = "azure.o1" AzureO1Mini ModelID = "azure.o1-mini" AzureO3 ModelID = "azure.o3" AzureO3Mini ModelID = "azure.o3-mini" AzureO4Mini ModelID = "azure.o4-mini" )
const ( // Bedrock BedrockClaude37Sonnet ModelID = "bedrock.claude-3.7-sonnet" )
Model IDs
type ModelProvider ¶
type ModelProvider string
const ( ProviderBedrock ModelProvider = "bedrock" // ForTests ProviderMock ModelProvider = "__mock" )
const ProviderAzure ModelProvider = "azure"
Click to show internal directories.
Click to hide internal directories.