genai

package module
v0.5.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Mar 6, 2025 License: Apache-2.0 Imports: 29 Imported by: 16

README

GitHub go.mod Go version Go Reference

✨ NEW ✨

Google Gemini Multimodal Live support

Introducing support for the Gemini Multimodal Live feature. Here's an example Multimodal Live server showing realtime conversation and video streaming: code

Google Gen AI Go SDK

The Google Gen AI Go SDK enables developers to use Google's state-of-the-art generative AI models (like Gemini) to build AI-powered features and applications. This SDK supports use cases like:

  • Generate text from text-only input
  • Generate text from text-and-images input (multimodal)
  • ...

For example, with just a few lines of code, you can access Gemini's multimodal capabilities to generate text from text-and-image input.

parts := []*genai.Part{
  {Text: "What's this image about?"},
  {InlineData: &genai.Blob{Data: imageBytes, MIMEType: "image/jpeg"}},
}
result, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash-exp", []*genai.Content{{Parts: parts}}, nil)

Installation and usage

Add the SDK to your module with go get google.golang.org/genai.

Create Clients

Imports

import "google.golang.org/genai"

Gemini API Client:

client, err := genai.NewClient(ctx, &genai.ClientConfig{
	APIKey:   apiKey,
	Backend:  genai.BackendGeminiAPI,
})

Vertex AI Client:

client, err := genai.NewClient(ctx, &genai.ClientConfig{
	Project:  project,
	Location: location,
	Backend:  genai.BackendVertexAI,
})

License

The contents of this repository are licensed under the Apache License, version 2.0.

Documentation

Index

Examples

Constants

This section is empty.

Variables

View Source
var ErrPageDone = errors.New("PageDone")

ErrPageDone is the error returned by Next when no more page is available.

Functions

func Ptr

func Ptr[T any](t T) *T

Ptr returns a pointer to its argument. It can be used to initialize pointer fields:

genai.GenerateContentConfig{Temperature: genai.Ptr(0.5)}

Types

type Backend

type Backend int

Backend is the GenAI backend to use for the client.

const (
	// BackendUnspecified causes the backend determined automatically. If the
	// GOOGLE_GENAI_USE_VERTEXAI environment variable is set to "1" or "true", then
	// the backend is BackendVertexAI. Otherwise, if GOOGLE_GENAI_USE_VERTEXAI
	// is unset or set to any other value, then BackendGeminiAPI is used.  Explicitly
	// setting the backend in ClientConfig overrides the environment variable.
	BackendUnspecified Backend = iota
	// BackendGeminiAPI is the Gemini API backend.
	BackendGeminiAPI
	// BackendVertexAI is the Vertex AI backend.
	BackendVertexAI
)

func (Backend) String

func (t Backend) String() string

The Stringer interface for Backend.

type Blob

type Blob struct {
	// Required. Raw bytes.
	Data []byte `json:"data,omitempty"`
	// Required. The IANA standard MIME type of the source data.
	MIMEType string `json:"mimeType,omitempty"`
}

Content blob.

type BlockedReason

type BlockedReason string

Blocked reason.

const (
	// Unspecified blocked reason.
	BlockedReasonUnspecified BlockedReason = "BLOCKED_REASON_UNSPECIFIED"
	// Candidates blocked due to safety.
	BlockedReasonSafety BlockedReason = "SAFETY"
	// Candidates blocked due to other reason.
	BlockedReasonOther BlockedReason = "OTHER"
	// Candidates blocked due to the terms which are included from the terminology blocklist.
	BlockedReasonBlocklist BlockedReason = "BLOCKLIST"
	// Candidates blocked due to prohibited content.
	BlockedReasonProhibitedContent BlockedReason = "PROHIBITED_CONTENT"
)

type CachedContent added in v0.1.0

type CachedContent struct {
	// The server-generated resource name of the cached content.
	Name string `json:"name,omitempty"`
	// The user-generated meaningful display name of the cached content.
	DisplayName string `json:"displayName,omitempty"`
	// The name of the publisher model to use for cached content.
	Model string `json:"model,omitempty"`
	// Creation time of the cache entry.
	CreateTime *time.Time `json:"createTime,omitempty"`
	// When the cache entry was last updated in UTC time.
	UpdateTime *time.Time `json:"updateTime,omitempty"`
	// Expiration time of the cached content.
	ExpireTime *time.Time `json:"expireTime,omitempty"`
	// Metadata on the usage of the cached content.
	UsageMetadata *CachedContentUsageMetadata `json:"usageMetadata,omitempty"`
}

A resource used in LLM queries for users to explicitly specify what to cache.

type CachedContentUsageMetadata added in v0.1.0

type CachedContentUsageMetadata struct {
	// Duration of audio in seconds. If nil, then no AudioDurationSeconds is returned by
	// the API.
	AudioDurationSeconds *int32 `json:"audioDurationSeconds,omitempty"`
	// Number of images. If nil, then no ImageCount is returned by the API.
	ImageCount *int32 `json:"imageCount,omitempty"`
	// Number of text characters. If nil, then no TextCount is returned by the API.
	TextCount *int32 `json:"textCount,omitempty"`
	// Total number of tokens that the cached content consumes.
	TotalTokenCount int32 `json:"totalTokenCount,omitempty"`
	// Duration of video in seconds. If nil, then no VideoDurationSeconds is returned by
	// the API.
	VideoDurationSeconds *int32 `json:"videoDurationSeconds,omitempty"`
}

Metadata on the usage of the cached content.

type Caches added in v0.1.0

type Caches struct {
	// contains filtered or unexported fields
}

func (Caches) All added in v0.4.0

func (Caches) Create added in v0.1.0

func (m Caches) Create(ctx context.Context, model string, config *CreateCachedContentConfig) (*CachedContent, error)

func (Caches) Delete added in v0.1.0

func (Caches) Get added in v0.1.0

func (m Caches) Get(ctx context.Context, name string, config *GetCachedContentConfig) (*CachedContent, error)

func (Caches) List added in v0.4.0

func (Caches) Update added in v0.1.0

func (m Caches) Update(ctx context.Context, name string, config *UpdateCachedContentConfig) (*CachedContent, error)

type Candidate

type Candidate struct {
	// Contains the multi-part content of the response.
	Content *Content `json:"content,omitempty"`
	// Source attribution of the generated content.
	CitationMetadata *CitationMetadata `json:"citationMetadata,omitempty"`
	// Describes the reason the model stopped generating tokens.
	FinishMessage string `json:"finishMessage,omitempty"`
	// Number of tokens for this candidate. If nil, then no TokenCount is returned by the
	// API.
	TokenCount *int32 `json:"tokenCount,omitempty"`
	// Output only. Average log probability score of the candidate. If nil, then no AvgLogprobs
	// is returned by the API.
	AvgLogprobs *float64 `json:"avgLogprobs,omitempty"`
	// Output only. The reason why the model stopped generating tokens. If empty, the model
	// has not stopped generating the tokens.
	FinishReason FinishReason `json:"finishReason,omitempty"`
	// Output only. Metadata specifies sources used to ground generated content.
	GroundingMetadata *GroundingMetadata `json:"groundingMetadata,omitempty"`
	// Output only. Index of the candidate. If nil, then no Index is returned by the API.
	Index *int32 `json:"index,omitempty"`
	// Output only. Log-likelihood scores for the response tokens and top tokens
	LogprobsResult *LogprobsResult `json:"logprobsResult,omitempty"`
	// Output only. List of ratings for the safety of a response candidate. There is at
	// most one rating per category.
	SafetyRatings []*SafetyRating `json:"safetyRatings,omitempty"`
}

A response candidate generated from the model.

type Citation

type Citation struct {
	// Output only. End index into the content.
	EndIndex int32 `json:"endIndex,omitempty"`
	// Output only. License of the attribution.
	License string `json:"license,omitempty"`
	// Output only. Publication date of the attribution.
	PublicationDate *civil.Date `json:"publicationDate,omitempty"`
	// Output only. Start index into the content.
	StartIndex int32 `json:"startIndex,omitempty"`
	// Output only. Title of the attribution.
	Title string `json:"title,omitempty"`
	// Output only. URL reference of the attribution.
	URI string `json:"uri,omitempty"`
}

Source attributions for content.

func (*Citation) UnmarshalJSON added in v0.4.0

func (c *Citation) UnmarshalJSON(data []byte) error

UnmarshalJSON custom unmarshalling to handle PublicationDate as a map containing year, month, and day.

type CitationMetadata

type CitationMetadata struct {
	// Contains citation information when the model directly quotes, at
	// length, from another source. Can include traditional websites and code
	// repositories.
	Citations []*Citation `json:"citations,omitempty"`
}

Citation information when the model quotes another source.

type Client

type Client struct {
	Models *Models
	Live   *Live
	Caches *Caches
	// contains filtered or unexported fields
}

Client is the GenAI client.

func NewClient

func NewClient(ctx context.Context, cc *ClientConfig) (*Client, error)

NewClient creates a new GenAI client.

You can configure the client by passing in a ClientConfig struct.

Example (Geminiapi)

This example shows how to create a new client for Gemini API.

package main

import (
	"context"
	"fmt"
	"log"

	"google.golang.org/genai"
)

// Your Google API key
const apiKey = "your-api-key"

func main() {
	ctx := context.Background()
	client, err := genai.NewClient(ctx, &genai.ClientConfig{
		APIKey:  apiKey,
		Backend: genai.BackendGeminiAPI,
	})
	if err != nil {
		log.Fatalf("failed to create client: %v", err)
	}
	fmt.Println(client.ClientConfig().APIKey)
}
Output:

Example (Vertexai)

This example shows how to create a new client for Vertex AI.

package main

import (
	"context"
	"fmt"
	"log"

	"google.golang.org/genai"
)

// Your GCP project
const project = "your-project"

// A GCP location like "us-central1"
const location = "some-gcp-location"

func main() {
	ctx := context.Background()
	client, err := genai.NewClient(ctx, &genai.ClientConfig{
		Project:  project,
		Location: location,
		Backend:  genai.BackendVertexAI,
	})
	if err != nil {
		log.Fatalf("failed to create client: %v", err)
	}
	fmt.Println(client.ClientConfig().Backend)
}
Output:

func (Client) ClientConfig

func (c Client) ClientConfig() ClientConfig

ClientConfig returns the ClientConfig for the client.

The returned ClientConfig is a copy of the ClientConfig used to create the client.

type ClientConfig

type ClientConfig struct {
	APIKey      string              // API Key for GenAI. Required for BackendGeminiAPI.
	Backend     Backend             // Backend for GenAI. See Backend constants. Defaults to BackendGeminiAPI unless explicitly set to BackendVertexAI, or the environment variable GOOGLE_GENAI_USE_VERTEXAI is set to "1" or "true".
	Project     string              // GCP Project ID for Vertex AI. Required for BackendVertexAI.
	Location    string              // GCP Location/Region for Vertex AI. Required for BackendVertexAI. See https://cloud.google.com/vertex-ai/docs/general/locations
	Credentials *google.Credentials // Optional. Google credentials.  If not specified, application default credentials will be used.
	HTTPClient  *http.Client        // Optional HTTP client to use. If nil, a default client will be created. For Vertex AI, this client must handle authentication appropriately.
	HTTPOptions HTTPOptions         // Optional HTTP options to override.
}

ClientConfig is the configuration for the GenAI client.

type ClientError

type ClientError struct {
	// contains filtered or unexported fields
}

ClientError is an error that occurs when the GenAI API receives an invalid request from a client.

func (ClientError) Error

func (e ClientError) Error() string

Error returns a string representation of the ClientError.

type CodeExecutionResult

type CodeExecutionResult struct {
	// Required. Outcome of the code execution.
	Outcome Outcome `json:"outcome,omitempty"`
	// Optional. Contains stdout when code execution is successful, stderr or other description
	// otherwise.
	Output string `json:"output,omitempty"`
}

Result of executing the ExecutableCode. Always follows a `part` containing the ExecutableCode.

type ComputeTokensConfig added in v0.1.0

type ComputeTokensConfig struct {
	// Used to override HTTP request options.
	HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"`
}

Optional parameters for computing tokens.

type ComputeTokensResponse added in v0.1.0

type ComputeTokensResponse struct {
	// Lists of tokens info from the input. A ComputeTokensRequest could have multiple instances
	// with a prompt in each instance. We also need to return lists of tokens info for the
	// request with multiple instances.
	TokensInfo []*TokensInfo `json:"tokensInfo,omitempty"`
}

Response for computing tokens.

type Content

type Content struct {
	// List of parts that constitute a single message. Each part may have
	// a different IANA MIME type.
	Parts []*Part `json:"parts,omitempty"`
	// Optional. The producer of the content. Must be either 'user' or
	// 'model'. Useful to set for multi-turn conversations, otherwise can be
	// left blank or unset. If role is not specified, SDK will determine the role.
	Role string `json:"role,omitempty"`
}

Contains the multi-part content of a message.

func NewModelContentFromBytes added in v0.4.0

func NewModelContentFromBytes(data []byte, mimeType string) *Content

NewModelContentFromBytes builds a Content with a "model" role from a single byte array.

func NewModelContentFromCodeExecutionResult added in v0.4.0

func NewModelContentFromCodeExecutionResult(outcome Outcome, output string) *Content

NewModelContentFromCodeExecutionResult builds a Content with a "model" role from a single code execution result.

func NewModelContentFromExecutableCode added in v0.4.0

func NewModelContentFromExecutableCode(code string, language Language) *Content

NewModelContentFromExecutableCode builds a Content with a "model" role from a single executable code.

func NewModelContentFromFunctionCall added in v0.4.0

func NewModelContentFromFunctionCall(name string, args map[string]any) *Content

NewModelContentFromFunctionCall builds a Content with a "model" role from a single function call.

func NewModelContentFromParts added in v0.4.0

func NewModelContentFromParts(parts []*Part) *Content

NewModelContent builds a Content with a "model" role from a list of parts.

func NewModelContentFromText added in v0.4.0

func NewModelContentFromText(text string) *Content

NewModelContentFromText builds a Content with a "model" role from a single text string.

func NewModelContentFromURI added in v0.4.0

func NewModelContentFromURI(fileURI, mimeType string) *Content

NewModelContentFromURI builds a Content with a "model" role from a single file URI.

func NewUserContentFromBytes added in v0.4.0

func NewUserContentFromBytes(data []byte, mimeType string) *Content

NewUserContentFromBytes builds a Content with a "user" role from a single byte array.

func NewUserContentFromCodeExecutionResult added in v0.4.0

func NewUserContentFromCodeExecutionResult(outcome Outcome, output string) *Content

NewUserContentFromCodeExecutionResult builds a Content with a "user" role from a single code execution result.

func NewUserContentFromExecutableCode added in v0.4.0

func NewUserContentFromExecutableCode(code string, language Language) *Content

NewUserContentFromExecutableCode builds a Content with a "user" role from a single executable code.

func NewUserContentFromFunctionResponse added in v0.4.0

func NewUserContentFromFunctionResponse(name string, response map[string]any) *Content

NewUserContentFromFunctionResponse builds a Content with a "user" role from a single function response.

func NewUserContentFromParts added in v0.4.0

func NewUserContentFromParts(parts []*Part) *Content

NewUserContent builds a Content with a "user" role from a list of parts.

func NewUserContentFromText added in v0.4.0

func NewUserContentFromText(text string) *Content

NewUserContentFromText builds a Content with a "user" role from a single text string.

func NewUserContentFromURI added in v0.4.0

func NewUserContentFromURI(fileURI, mimeType string) *Content

NewUserContentFromURI builds a Content with a "user" role from a single file URI.

func Text

func Text(text string) []*Content

Text returns a slice of Content with a single Part with the given text.

type ContentEmbedding added in v0.5.0

type ContentEmbedding struct {
	// A list of floats representing an embedding.
	Values []float32 `json:"values,omitempty"`
	// Vertex API only. Statistics of the input text associated with this
	// embedding.
	Statistics *ContentEmbeddingStatistics `json:"statistics,omitempty"`
}

The embedding generated from an input content.

type ContentEmbeddingStatistics added in v0.5.0

type ContentEmbeddingStatistics struct {
	// Vertex API only. If the input text was truncated due to having
	// a length longer than the allowed maximum input.
	Truncated bool `json:"truncated,omitempty"`
	// Vertex API only. Number of tokens of the input text.
	TokenCount *float32 `json:"tokenCount,omitempty"`
}

Statistics of the input text associated with the result of content embedding.

type ControlReferenceConfig

type ControlReferenceConfig struct {
	// The type of control reference image to use.
	ControlType ControlReferenceType `json:"controlType,omitempty"`
	// Defaults to False. When set to True, the control image will be
	// computed by the model based on the control type. When set to False,
	// the control image must be provided by the user.
	EnableControlImageComputation bool `json:"enableControlImageComputation,omitempty"`
}

Configuration for a Control reference image.

type ControlReferenceImage

type ControlReferenceImage struct {
	// The reference image for the editing operation.
	ReferenceImage *Image `json:"referenceImage,omitempty"`
	// The ID of the reference image.
	ReferenceID int32 `json:"referenceId,omitempty"`

	// Configuration for the control reference image.
	Config *ControlReferenceConfig `json:"config,omitempty"`
	// contains filtered or unexported fields
}

A control image is an image that represents a sketch image of areas for the model to fill in based on the prompt. The image of the control reference image is either a control image provided by the user, or a regular image which the backend will use to generate a control image of. In the case of the latter, the enable_control_image_computation field in the config should be set to true.

func NewControlReferenceImage added in v0.5.0

func NewControlReferenceImage(referenceImage *Image, referenceID int32, config *ControlReferenceConfig) *ControlReferenceImage

NewControlReferenceImage creates a new ControlReferenceImage.

type ControlReferenceType

type ControlReferenceType string

Enum representing the control type of a control reference image.

const (
	ControlReferenceTypeDefault  ControlReferenceType = "CONTROL_TYPE_DEFAULT"
	ControlReferenceTypeCanny    ControlReferenceType = "CONTROL_TYPE_CANNY"
	ControlReferenceTypeScribble ControlReferenceType = "CONTROL_TYPE_SCRIBBLE"
	ControlReferenceTypeFaceMesh ControlReferenceType = "CONTROL_TYPE_FACE_MESH"
)

type CountTokensConfig added in v0.1.0

type CountTokensConfig struct {
	// Used to override HTTP request options.
	HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"`
	// Instructions for the model to steer it toward better performance.
	SystemInstruction *Content `json:"systemInstruction,omitempty"`
	// Code that enables the system to interact with external systems to
	// perform an action outside of the knowledge and scope of the model.
	Tools []*Tool `json:"tools,omitempty"`
	// Configuration that the model uses to generate the response. Not
	// supported by the Gemini Developer API.
	GenerationConfig *GenerationConfig `json:"generationConfig,omitempty"`
}

Config for the count_tokens method.

type CountTokensResponse added in v0.1.0

type CountTokensResponse struct {
	// Total number of tokens.
	TotalTokens int32 `json:"totalTokens,omitempty"`
	// Number of tokens in the cached part of the prompt (the cached content). If nil, then
	// no CachedContentTokenCount is returned by the API.
	CachedContentTokenCount *int32 `json:"cachedContentTokenCount,omitempty"`
}

Response for counting tokens.

type CreateCachedContentConfig added in v0.1.0

type CreateCachedContentConfig struct {
	// Used to override HTTP request options.
	HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"`
	// The TTL for this resource. The expiration time is computed: now + TTL.
	TTL string `json:"ttl,omitempty"`
	// Timestamp of when this resource is considered expired.
	ExpireTime *time.Time `json:"expireTime,omitempty"`
	// The user-generated meaningful display name of the cached content.
	DisplayName string `json:"displayName,omitempty"`
	// The content to cache.
	Contents []*Content `json:"contents,omitempty"`
	// Developer set system instruction.
	SystemInstruction *Content `json:"systemInstruction,omitempty"`
	// A list of `Tools` the model may use to generate the next response.
	Tools []*Tool `json:"tools,omitempty"`
	// Configuration for the tools to use. This config is shared for all tools.
	ToolConfig *ToolConfig `json:"toolConfig,omitempty"`
}

Optional configuration for cached content creation.

type DeleteCachedContentConfig added in v0.1.0

type DeleteCachedContentConfig struct {
	// Used to override HTTP request options.
	HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"`
}

Optional parameters for caches.delete method.

type DeleteCachedContentResponse added in v0.1.0

type DeleteCachedContentResponse struct {
}

Empty response for caches.delete method.

type DeleteModelConfig added in v0.4.0

type DeleteModelConfig struct {
	// Used to override HTTP request options.
	HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"`
}

type DeleteModelResponse added in v0.4.0

type DeleteModelResponse struct {
}

type DeploymentResourcesType added in v0.4.0

type DeploymentResourcesType string
const (
	// Should not be used.
	DeploymentResourcesTypeUnspecified DeploymentResourcesType = "DEPLOYMENT_RESOURCES_TYPE_UNSPECIFIED"
	// Resources that are dedicated to the DeployedModel, and that need a higher degree
	// of manual configuration.
	DeploymentResourcesTypeDedicatedResources DeploymentResourcesType = "DEDICATED_RESOURCES"
	// Resources that to large degree are decided by Vertex AI, and require only a modest
	// additional configuration.
	DeploymentResourcesTypeAutomaticResources DeploymentResourcesType = "AUTOMATIC_RESOURCES"
	// Resources that can be shared by multiple DeployedModels. A pre-configured DeploymentResourcePool
	// is required.
	DeploymentResourcesTypeSharedResources DeploymentResourcesType = "SHARED_RESOURCES"
)

type DownloadFileConfig added in v0.5.0

type DownloadFileConfig struct {
	// Used to override HTTP request options.
	HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"`
}

Used to override the default configuration.

type DynamicRetrievalConfig

type DynamicRetrievalConfig struct {
	// The mode of the predictor to be used in dynamic retrieval.
	Mode DynamicRetrievalConfigMode `json:"mode,omitempty"`
	// Optional. The threshold to be used in dynamic retrieval. If not set, a system default
	// value is used.
	DynamicThreshold *float32 `json:"dynamicThreshold,omitempty"`
}

Describes the options to customize dynamic retrieval.

type DynamicRetrievalConfigMode

type DynamicRetrievalConfigMode string

Config for the dynamic retrieval config mode.

const (
	// Always trigger retrieval.
	DynamicRetrievalConfigModeUnspecified DynamicRetrievalConfigMode = "MODE_UNSPECIFIED"
	// Run retrieval only when system decides it is necessary.
	DynamicRetrievalConfigModeDynamic DynamicRetrievalConfigMode = "MODE_DYNAMIC"
)

type EditImageConfig added in v0.5.0

type EditImageConfig struct {
	// Used to override HTTP request options.
	HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"`
	// Cloud Storage URI used to store the generated images.
	OutputGCSURI string `json:"outputGcsUri,omitempty"`
	// Description of what to discourage in the generated images.
	NegativePrompt string `json:"negativePrompt,omitempty"`
	// Number of images to generate.
	NumberOfImages int32 `json:"numberOfImages,omitempty"`
	// Aspect ratio of the generated images.
	AspectRatio string `json:"aspectRatio,omitempty"`
	// Controls how much the model adheres to the text prompt. Large values increase output
	// and prompt alignment, but may compromise image quality. If empty, then API will determine
	// the default value.
	GuidanceScale *float32 `json:"guidanceScale,omitempty"`
	// Seed for the random number generator. If empty, then API will determine the default
	// value.
	Seed *int32 `json:"seed,omitempty"`
	// Filter level for safety filtering.
	SafetyFilterLevel SafetyFilterLevel `json:"safetyFilterLevel,omitempty"`
	// Allows generation of people by the model.
	PersonGeneration PersonGeneration `json:"personGeneration,omitempty"`
	// Whether to report the safety scores of each image in the response.
	IncludeSafetyAttributes bool `json:"includeSafetyAttributes,omitempty"`
	// Whether to include the Responsible AI filter reason if the image
	// is filtered out of the response.
	IncludeRAIReason bool `json:"includeRaiReason,omitempty"`
	// Language of the text in the prompt.
	Language ImagePromptLanguage `json:"language,omitempty"`
	// MIME type of the generated image.
	OutputMIMEType string `json:"outputMimeType,omitempty"`
	// Compression quality of the generated image (for `image/jpeg` MIME type only). If
	// empty, then API will determine the default value.
	OutputCompressionQuality *int32 `json:"outputCompressionQuality,omitempty"`
	// Describes the editing mode for the request.
	EditMode EditMode `json:"editMode,omitempty"`
}

Configuration for editing an image.

type EditImageResponse added in v0.5.0

type EditImageResponse struct {
	// Generated images.
	GeneratedImages []*GeneratedImage `json:"generatedImages,omitempty"`
}

Response for the request to edit an image.

type EditMode added in v0.5.0

type EditMode string

Enum representing the Imagen 3 Edit mode.

const (
	EditModeDefault           EditMode = "EDIT_MODE_DEFAULT"
	EditModeInpaintRemoval    EditMode = "EDIT_MODE_INPAINT_REMOVAL"
	EditModeInpaintInsertion  EditMode = "EDIT_MODE_INPAINT_INSERTION"
	EditModeOutpaint          EditMode = "EDIT_MODE_OUTPAINT"
	EditModeControlledEditing EditMode = "EDIT_MODE_CONTROLLED_EDITING"
	EditModeStyle             EditMode = "EDIT_MODE_STYLE"
	EditModeBgswap            EditMode = "EDIT_MODE_BGSWAP"
	EditModeProductImage      EditMode = "EDIT_MODE_PRODUCT_IMAGE"
)

type EmbedContentConfig added in v0.5.0

type EmbedContentConfig struct {
	// Used to override HTTP request options.
	HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"`
	// Type of task for which the embedding will be used.
	TaskType string `json:"taskType,omitempty"`
	// Title for the text. Only applicable when TaskType is
	// `RETRIEVAL_DOCUMENT`.
	Title string `json:"title,omitempty"`
	// Reduced dimension for the output embedding. If set,
	// excessive values in the output embedding are truncated from the end.
	// Supported by newer models since 2024 only. You cannot set this value if
	// using the earlier model (`models/embedding-001`).
	OutputDimensionality *int32 `json:"outputDimensionality,omitempty"`
	// Vertex API only. The MIME type of the input.
	MIMEType string `json:"mimeType,omitempty"`
	// Vertex API only. Whether to silently truncate inputs longer than
	// the max sequence length. If this option is set to false, oversized inputs
	// will lead to an INVALID_ARGUMENT error, similar to other text APIs.
	AutoTruncate bool `json:"autoTruncate,omitempty"`
}

Optional parameters for the embed_content method.

type EmbedContentMetadata added in v0.5.0

type EmbedContentMetadata struct {
	// Vertex API only. The total number of billable characters included
	// in the request.
	BillableCharacterCount *int32 `json:"billableCharacterCount,omitempty"`
}

Request-level metadata for the Vertex Embed Content API.

type EmbedContentResponse added in v0.5.0

type EmbedContentResponse struct {
	// The embeddings for each request, in the same order as provided in
	// the batch request.
	Embeddings []*ContentEmbedding `json:"embeddings,omitempty"`
	// Vertex API only. Metadata about the request.
	Metadata *EmbedContentMetadata `json:"metadata,omitempty"`
}

Response for the embed_content method.

type Endpoint added in v0.4.0

type Endpoint struct {
	// Resource name of the endpoint.
	Name string `json:"name,omitempty"`
	// ID of the model that's deployed to the endpoint.
	DeployedModelID string `json:"deployedModelId,omitempty"`
}

An endpoint where you deploy models.

type ExecutableCode

type ExecutableCode struct {
	// Required. The code to be executed.
	Code string `json:"code,omitempty"`
	// Required. Programming language of the `code`.
	Language Language `json:"language,omitempty"`
}

Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the FunctionDeclaration tool and FunctionCallingConfig mode is set to [Mode.CODE].

type FileData

type FileData struct {
	// Required. URI.
	FileURI string `json:"fileUri,omitempty"`
	// Required. The IANA standard MIME type of the source data.
	MIMEType string `json:"mimeType,omitempty"`
}

URI based data.

type FinishReason

type FinishReason string

The reason why the model stopped generating tokens. If empty, the model has not stopped generating the tokens.

const (
	// The finish reason is unspecified.
	FinishReasonUnspecified FinishReason = "FINISH_REASON_UNSPECIFIED"
	// Token generation reached a natural stopping point or a configured stop sequence.
	FinishReasonStop FinishReason = "STOP"
	// Token generation reached the configured maximum output tokens.
	FinishReasonMaxTokens FinishReason = "MAX_TOKENS"
	// Token generation stopped because the content potentially contains safety violations.
	// NOTE: When streaming, content is empty if content filters blocks the output.
	FinishReasonSafety FinishReason = "SAFETY"
	// The token generation stopped because of potential recitation.
	FinishReasonRecitation FinishReason = "RECITATION"
	// All other reasons that stopped the token generation.
	FinishReasonOther FinishReason = "OTHER"
	// Token generation stopped because the content contains forbidden terms.
	FinishReasonBlocklist FinishReason = "BLOCKLIST"
	// Token generation stopped for potentially containing prohibited content.
	FinishReasonProhibitedContent FinishReason = "PROHIBITED_CONTENT"
	// Token generation stopped because the content potentially contains Sensitive Personally
	// Identifiable Information (SPII).
	FinishReasonSPII FinishReason = "SPII"
	// The function call generated by the model is invalid.
	FinishReasonMalformedFunctionCall FinishReason = "MALFORMED_FUNCTION_CALL"
)

type FunctionCall

type FunctionCall struct {
	// The unique ID of the function call. If populated, the client to execute the
	// `function_call` and return the response with the matching `id`.
	ID string `json:"id,omitempty"`
	// Optional. Required. The function parameters and values in JSON object format. See
	// [FunctionDeclaration.parameters] for parameter details.
	Args map[string]any `json:"args,omitempty"`
	// Required. The name of the function to call. Matches [FunctionDeclaration.name].
	Name string `json:"name,omitempty"`
}

A function call.

type FunctionCallingConfig

type FunctionCallingConfig struct {
	// Optional. Function calling mode.
	Mode FunctionCallingConfigMode `json:"mode,omitempty"`
	// Optional. Function names to call. Only set when the Mode is ANY. Function names should
	// match [FunctionDeclaration.name]. With mode set to ANY, model will predict a function
	// call from the set of function names provided.
	AllowedFunctionNames []string `json:"allowedFunctionNames,omitempty"`
}

Function calling config.

type FunctionCallingConfigMode

type FunctionCallingConfigMode string

Config for the function calling config mode.

const (
	// The function calling config mode is unspecified. Should not be used.
	FunctionCallingConfigModeUnspecified FunctionCallingConfigMode = "MODE_UNSPECIFIED"
	// Default model behavior, model decides to predict either function calls or natural
	// language response.
	FunctionCallingConfigModeAuto FunctionCallingConfigMode = "AUTO"
	// Model is constrained to always predicting function calls only. If "allowed_function_names"
	// are set, the predicted function calls will be limited to any one of "allowed_function_names",
	// else the predicted function calls will be any one of the provided "function_declarations".
	FunctionCallingConfigModeAny FunctionCallingConfigMode = "ANY"
	// Model will not predict any function calls. Model behavior is same as when not passing
	// any function declarations.
	FunctionCallingConfigModeNone FunctionCallingConfigMode = "NONE"
)

type FunctionDeclaration

type FunctionDeclaration struct {
	// Describes the output from the function in the OpenAPI JSON Schema
	// Object format.
	Response *Schema `json:"response,omitempty"`
	// Optional. Description and purpose of the function. Model uses it to decide how and
	// whether to call the function.
	Description string `json:"description,omitempty"`
	// Required. The name of the function to call. Must start with a letter or an underscore.
	// Must be a-z, A-Z, 0-9, or contain underscores, dots and dashes, with a maximum length
	// of 64.
	Name string `json:"name,omitempty"`
	// Optional. Describes the parameters to this function in JSON Schema Object format.
	// Reflects the Open API 3.03 Parameter Object. string Key: the name of the parameter.
	// Parameter names are case sensitive. Schema Value: the Schema defining the type used
	// for the parameter. For function with no parameters, this can be left unset. Parameter
	// names must start with a letter or an underscore and must only contain chars a-z,
	// A-Z, 0-9, or underscores with a maximum length of 64. Example with 1 required and
	// 1 optional parameter: type: OBJECT properties: param1: type: STRING param2: type:
	// INTEGER required: - param1
	Parameters *Schema `json:"parameters,omitempty"`
}

Defines a function that the model can generate JSON inputs for. The inputs are based on `OpenAPI 3.0 specifications <https://spec.openapis.org/oas/v3.0.3>`_.

type FunctionResponse

type FunctionResponse struct {
	// The ID of the function call this response is for. Populated by the client
	// to match the corresponding function call `id`.
	ID string `json:"id,omitempty"`
	// Required. The name of the function to call. Matches [FunctionDeclaration.name] and
	// [FunctionCall.name].
	Name string `json:"name,omitempty"`
	// Required. The function response in JSON object format. Use "output" key to specify
	// function output and "error" key to specify error details (if any). If "output" and
	// "error" keys are not specified, then whole "response" is treated as function output.
	Response map[string]any `json:"response,omitempty"`
}

A function response.

type GenerateContentConfig

type GenerateContentConfig struct {
	// Used to override HTTP request options.
	HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"`
	// Instructions for the model to steer it toward better performance.
	// For example, "Answer as concisely as possible" or "Don't use technical
	// terms in your response".
	SystemInstruction *Content `json:"systemInstruction,omitempty"`
	// Value that controls the degree of randomness in token selection.
	// Lower temperatures are good for prompts that require a less open-ended or
	// creative response, while higher temperatures can lead to more diverse or
	// creative results.
	Temperature *float32 `json:"temperature,omitempty"`
	// Tokens are selected from the most to least probable until the sum
	// of their probabilities equals this value. Use a lower value for less
	// random responses and a higher value for more random responses.
	TopP *float32 `json:"topP,omitempty"`
	// For each token selection step, the “top_k“ tokens with the
	// highest probabilities are sampled. Then tokens are further filtered based
	// on “top_p“ with the final token selected using temperature sampling. Use
	// a lower number for less random responses and a higher number for more
	// random responses.
	TopK *float32 `json:"topK,omitempty"`
	// Number of response variations to return.
	CandidateCount *int32 `json:"candidateCount,omitempty"`
	// Maximum number of tokens that can be generated in the response.
	MaxOutputTokens *int32 `json:"maxOutputTokens,omitempty"`
	// List of strings that tells the model to stop generating text if one
	// of the strings is encountered in the response.
	StopSequences []string `json:"stopSequences,omitempty"`
	// Whether to return the log probabilities of the tokens that were
	// chosen by the model at each step.
	ResponseLogprobs bool `json:"responseLogprobs,omitempty"`
	// Number of top candidate tokens to return the log probabilities for
	// at each generation step.
	Logprobs *int32 `json:"logprobs,omitempty"`
	// Positive values penalize tokens that already appear in the
	// generated text, increasing the probability of generating more diverse
	// content.
	PresencePenalty *float32 `json:"presencePenalty,omitempty"`
	// Positive values penalize tokens that repeatedly appear in the
	// generated text, increasing the probability of generating more diverse
	// content.
	FrequencyPenalty *float32 `json:"frequencyPenalty,omitempty"`
	// When “seed“ is fixed to a specific number, the model makes a best
	// effort to provide the same response for repeated requests. By default, a
	// random number is used.
	Seed *int32 `json:"seed,omitempty"`
	// Output response media type of the generated candidate text.
	ResponseMIMEType string `json:"responseMimeType,omitempty"`
	// Schema that the generated candidate text must adhere to.
	ResponseSchema *Schema `json:"responseSchema,omitempty"`
	// Configuration for model router requests.
	RoutingConfig *GenerationConfigRoutingConfig `json:"routingConfig,omitempty"`
	// Safety settings in the request to block unsafe content in the
	// response.
	SafetySettings []*SafetySetting `json:"safetySettings,omitempty"`
	// Code that enables the system to interact with external systems to
	// perform an action outside of the knowledge and scope of the model.
	Tools []*Tool `json:"tools,omitempty"`
	// Associates model output to a specific function call.
	ToolConfig *ToolConfig `json:"toolConfig,omitempty"`
	// Labels with user-defined metadata to break down billed charges.
	Labels map[string]string `json:"labels,omitempty"`
	// Resource name of a context cache that can be used in subsequent
	// requests.
	CachedContent string `json:"cachedContent,omitempty"`
	// The requested modalities of the response. Represents the set of
	// modalities that the model can return.
	ResponseModalities []string `json:"responseModalities,omitempty"`
	// If specified, the media resolution specified will be used.
	MediaResolution MediaResolution `json:"mediaResolution,omitempty"`
	// The speech generation configuration.
	SpeechConfig *SpeechConfig `json:"speechConfig,omitempty"`
	// If enabled, audio timestamp will be included in the request to the
	// model.
	AudioTimestamp bool `json:"audioTimestamp,omitempty"`
	// The thinking features configuration.
	ThinkingConfig *ThinkingConfig `json:"thinkingConfig,omitempty"`
}

Optional configuration for the GenerateContent. You can find API default values and more details at https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#generationconfig and https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/content-generation-parameters.

type GenerateContentResponse

type GenerateContentResponse struct {
	// Response variations returned by the model.
	Candidates []*Candidate `json:"candidates,omitempty"`
	// Timestamp when the request is made to the server.
	CreateTime *time.Time `json:"createTime,omitempty"`
	// Identifier for each response.
	ResponseID string `json:"responseId,omitempty"`
	// Output only. The model version used to generate the response.
	ModelVersion string `json:"modelVersion,omitempty"`
	// Output only. Content filter results for a prompt sent in the request. Note: Sent
	// only in the first stream chunk. Only happens when no candidates were generated due
	// to content violations.
	PromptFeedback *GenerateContentResponsePromptFeedback `json:"promptFeedback,omitempty"`
	// Usage metadata about the response(s).
	UsageMetadata *GenerateContentResponseUsageMetadata `json:"usageMetadata,omitempty"`
}

Response message for PredictionService.GenerateContent.

func (*GenerateContentResponse) CodeExecutionResult added in v0.5.0

func (r *GenerateContentResponse) CodeExecutionResult() string

CodeExecutionResult returns the code execution result in the GenerateContentResponse.

func (*GenerateContentResponse) ExecutableCode added in v0.5.0

func (r *GenerateContentResponse) ExecutableCode() string

ExecutableCode returns the executable code in the GenerateContentResponse.

func (*GenerateContentResponse) FunctionCalls added in v0.1.0

func (r *GenerateContentResponse) FunctionCalls() []*FunctionCall

FunctionCalls returns the list of function calls in the GenerateContentResponse.

func (*GenerateContentResponse) Text added in v0.1.0

func (r *GenerateContentResponse) Text() (string, error)

Text concatenates all the text parts in the GenerateContentResponse.

type GenerateContentResponsePromptFeedback

type GenerateContentResponsePromptFeedback struct {
	// Output only. Blocked reason.
	BlockReason BlockedReason `json:"blockReason,omitempty"`
	// Output only. A readable block reason message.
	BlockReasonMessage string `json:"blockReasonMessage,omitempty"`
	// Output only. Safety ratings.
	SafetyRatings []*SafetyRating `json:"safetyRatings,omitempty"`
}

Content filter results for a prompt sent in the request.

type GenerateContentResponseUsageMetadata

type GenerateContentResponseUsageMetadata struct {
	// Output only. Number of tokens in the cached part in the input (the cached content).
	// If nil, then no CachedContentTokenCount is returned by the API.
	CachedContentTokenCount *int32 `json:"cachedContentTokenCount,omitempty"`
	// Number of tokens in the response(all the generated response candidates). If nil,
	// then no CandidatesTokenCount is returned by the API.
	CandidatesTokenCount *int32 `json:"candidatesTokenCount,omitempty"`
	// Number of tokens in the prompt. When cached_content is set, this is still the total
	// effective prompt size meaning this includes the number of tokens in the cached content.
	// If nil, then no PromptTokenCount is returned by the API.
	PromptTokenCount *int32 `json:"promptTokenCount,omitempty"`
	// Total token count for prompt and response candidates.
	TotalTokenCount int32 `json:"totalTokenCount,omitempty"`
}

Usage metadata about response(s).

type GenerateImagesConfig added in v0.1.0

type GenerateImagesConfig struct {
	// Used to override HTTP request options.
	HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"`
	// Cloud Storage URI used to store the generated images.
	OutputGCSURI string `json:"outputGcsUri,omitempty"`
	// Description of what to discourage in the generated images.
	NegativePrompt string `json:"negativePrompt,omitempty"`
	// Number of images to generate.
	NumberOfImages *int32 `json:"numberOfImages,omitempty"`
	// Aspect ratio of the generated images.
	AspectRatio string `json:"aspectRatio,omitempty"`
	// Controls how much the model adheres to the text prompt. Large
	// values increase output and prompt alignment, but may compromise image
	// quality.
	GuidanceScale *float32 `json:"guidanceScale,omitempty"`
	// Random seed for image generation. This is not available when
	// “add_watermark“ is set to true.
	Seed *int32 `json:"seed,omitempty"`
	// Filter level for safety filtering.
	SafetyFilterLevel SafetyFilterLevel `json:"safetyFilterLevel,omitempty"`
	// Allows generation of people by the model.
	PersonGeneration PersonGeneration `json:"personGeneration,omitempty"`
	// Whether to report the safety scores of each image in the response.
	IncludeSafetyAttributes bool `json:"includeSafetyAttributes,omitempty"`
	// Whether to include the Responsible AI filter reason if the image
	// is filtered out of the response.
	IncludeRAIReason bool `json:"includeRaiReason,omitempty"`
	// Language of the text in the prompt.
	Language ImagePromptLanguage `json:"language,omitempty"`
	// MIME type of the generated image.
	OutputMIMEType string `json:"outputMimeType,omitempty"`
	// Compression quality of the generated image (for “image/jpeg“
	// only).
	OutputCompressionQuality *int32 `json:"outputCompressionQuality,omitempty"`
	// Whether to add a watermark to the generated images.
	AddWatermark bool `json:"addWatermark,omitempty"`
	// Whether to use the prompt rewriting logic.
	EnhancePrompt bool `json:"enhancePrompt,omitempty"`
}

The configuration for generating images. You can find API default values and more details at VertexAI: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/imagen-api. GeminiAPI: https://ai.google.dev/gemini-api/docs/imagen#imagen-model

type GenerateImagesResponse added in v0.1.0

type GenerateImagesResponse struct {
	// List of generated images.
	GeneratedImages []*GeneratedImage `json:"generatedImages,omitempty"`
}

The output images response.

type GeneratedImage added in v0.1.0

type GeneratedImage struct {
	// The output image data.
	Image *Image `json:"image,omitempty"`
	// Responsible AI filter reason if the image is filtered out of the
	// response.
	RAIFilteredReason string `json:"raiFilteredReason,omitempty"`
	// The rewritten prompt used for the image generation if the prompt
	// enhancer is enabled.
	EnhancedPrompt string `json:"enhancedPrompt,omitempty"`
}

An output image.

type GenerationConfig

type GenerationConfig struct {
	// Optional. If enabled, audio timestamp will be included in the request to the model.
	AudioTimestamp bool `json:"audioTimestamp,omitempty"`
	// Optional. Number of candidates to generate.
	CandidateCount *int32 `json:"candidateCount,omitempty"`
	// Optional. Frequency penalties.
	FrequencyPenalty *float32 `json:"frequencyPenalty,omitempty"`
	// Optional. Logit probabilities.
	Logprobs *int32 `json:"logprobs,omitempty"`
	// Optional. The maximum number of output tokens to generate per message.
	MaxOutputTokens *int32 `json:"maxOutputTokens,omitempty"`
	// Optional. Positive penalties.
	PresencePenalty *float32 `json:"presencePenalty,omitempty"`
	// Optional. If true, export the logprobs results in response.
	ResponseLogprobs bool `json:"responseLogprobs,omitempty"`
	// Optional. Output response mimetype of the generated candidate text. Supported mimetype:
	// - `text/plain`: (default) Text output. - `application/json`: JSON response in the
	// candidates. The model needs to be prompted to output the appropriate response type,
	// otherwise the behavior is undefined. This is a preview feature.
	ResponseMIMEType string `json:"responseMimeType,omitempty"`
	// Optional. The `Schema` object allows the definition of input and output data types.
	// These types can be objects, but also primitives and arrays. Represents a select subset
	// of an [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema). If
	// set, a compatible response_mime_type must also be set. Compatible mimetypes: `application/json`:
	// Schema for JSON response.
	ResponseSchema *Schema `json:"responseSchema,omitempty"`
	// Optional. Routing configuration.
	RoutingConfig *GenerationConfigRoutingConfig `json:"routingConfig,omitempty"`
	// Optional. Seed.
	Seed *int32 `json:"seed,omitempty"`
	// Optional. Stop sequences.
	StopSequences []string `json:"stopSequences,omitempty"`
	// Optional. Controls the randomness of predictions.
	Temperature *float32 `json:"temperature,omitempty"`
	// Optional. If specified, top-k sampling will be used.
	TopK *float32 `json:"topK,omitempty"`
	// Optional. If specified, nucleus sampling will be used.
	TopP *float32 `json:"topP,omitempty"`
}

Generation config. You can find API default values and more details at https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#generationconfig and https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/content-generation-parameters.

type GenerationConfigRoutingConfig

type GenerationConfigRoutingConfig struct {
	// Automated routing.
	AutoMode *GenerationConfigRoutingConfigAutoRoutingMode `json:"autoMode,omitempty"`
	// Manual routing.
	ManualMode *GenerationConfigRoutingConfigManualRoutingMode `json:"manualMode,omitempty"`
}

The configuration for routing the request to a specific model.

type GenerationConfigRoutingConfigAutoRoutingMode

type GenerationConfigRoutingConfigAutoRoutingMode struct {
	// The model routing preference.
	ModelRoutingPreference string `json:"modelRoutingPreference,omitempty"`
}

When automated routing is specified, the routing will be determined by the pretrained routing model and customer provided model routing preference.

type GenerationConfigRoutingConfigManualRoutingMode

type GenerationConfigRoutingConfigManualRoutingMode struct {
	// The model name to use. Only the public LLM models are accepted. e.g. 'gemini-1.5-pro-001'.
	ModelName string `json:"modelName,omitempty"`
}

When manual routing is set, the specified model will be used directly.

type GetCachedContentConfig added in v0.1.0

type GetCachedContentConfig struct {
	// Used to override HTTP request options.
	HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"`
}

Optional parameters for caches.get method.

type GetModelConfig added in v0.4.0

type GetModelConfig struct {
	// Used to override HTTP request options.
	HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"`
}

Optional parameters for models.get method.

type GoogleSearch

type GoogleSearch struct {
}

Tool to support Google Search in Model. Powered by Google.

type GoogleSearchRetrieval

type GoogleSearchRetrieval struct {
	// Specifies the dynamic retrieval configuration for the given source.
	DynamicRetrievalConfig *DynamicRetrievalConfig `json:"dynamicRetrievalConfig,omitempty"`
}

Tool to retrieve public web data for grounding, powered by Google.

type GroundingChunk

type GroundingChunk struct {
	// Grounding chunk from context retrieved by the retrieval tools.
	RetrievedContext *GroundingChunkRetrievedContext `json:"retrievedContext,omitempty"`
	// Grounding chunk from the web.
	Web *GroundingChunkWeb `json:"web,omitempty"`
}

Grounding chunk.

type GroundingChunkRetrievedContext

type GroundingChunkRetrievedContext struct {
	// Text of the attribution.
	Text string `json:"text,omitempty"`
	// Title of the attribution.
	Title string `json:"title,omitempty"`
	// URI reference of the attribution.
	URI string `json:"uri,omitempty"`
}

Chunk from context retrieved by the retrieval tools.

type GroundingChunkWeb

type GroundingChunkWeb struct {
	// Title of the chunk.
	Title string `json:"title,omitempty"`
	// URI reference of the chunk.
	URI string `json:"uri,omitempty"`
}

Chunk from the web.

type GroundingMetadata

type GroundingMetadata struct {
	// List of supporting references retrieved from specified grounding source.
	GroundingChunks []*GroundingChunk `json:"groundingChunks,omitempty"`
	// Optional. List of grounding support.
	GroundingSupports []*GroundingSupport `json:"groundingSupports,omitempty"`
	// Optional. Output only. Retrieval metadata.
	RetrievalMetadata *RetrievalMetadata `json:"retrievalMetadata,omitempty"`
	// Optional. Queries executed by the retrieval tools.
	RetrievalQueries []string `json:"retrievalQueries,omitempty"`
	// Optional. Google search entry for the following-up web searches.
	SearchEntryPoint *SearchEntryPoint `json:"searchEntryPoint,omitempty"`
	// Optional. Web search queries for the following-up web search.
	WebSearchQueries []string `json:"webSearchQueries,omitempty"`
}

Metadata returned to client when grounding is enabled.

type GroundingSupport

type GroundingSupport struct {
	// Confidence score of the support references. Ranges from 0 to 1. 1 is the most confident.
	// This list must have the same size as the grounding_chunk_indices.
	ConfidenceScores []float32 `json:"confidenceScores,omitempty"`
	// A list of indices (into 'grounding_chunk') specifying the citations associated with
	// the claim. For instance [1,3,4] means that grounding_chunk[1], grounding_chunk[3],
	// grounding_chunk[4] are the retrieved content attributed to the claim.
	GroundingChunkIndices []int32 `json:"groundingChunkIndices,omitempty"`
	// Segment of the content this support belongs to.
	Segment *Segment `json:"segment,omitempty"`
}

Grounding support.

type HTTPOptions added in v0.1.0

type HTTPOptions struct {
	// BaseURL specifies the base URL for the API endpoint. If unset, defaults to "https://generativelanguage.googleapis.com/"
	// for the Gemini API backend, and location-specific Vertex AI endpoint (e.g., "https://us-central1-aiplatform.googleapis.com/
	BaseURL string `json:"baseUrl,omitempty"`
	// APIVersion specifies the version of the API to use.
	APIVersion string `json:"apiVersion,omitempty"`
	// Additional HTTP headers to be sent with the request.
	Headers http.Header `json:"headers,omitempty"`
}

HTTP options to be used in each of the requests.

type HarmBlockMethod

type HarmBlockMethod string

Specify if the threshold is used for probability or severity score. If not specified, the threshold is used for probability score.

const (
	// The harm block method is unspecified.
	HarmBlockMethodUnspecified HarmBlockMethod = "HARM_BLOCK_METHOD_UNSPECIFIED"
	// The harm block method uses both probability and severity scores.
	HarmBlockMethodSeverity HarmBlockMethod = "SEVERITY"
	// The harm block method uses the probability score.
	HarmBlockMethodProbability HarmBlockMethod = "PROBABILITY"
)

type HarmBlockThreshold

type HarmBlockThreshold string

The harm block threshold.

const (
	// Unspecified harm block threshold.
	HarmBlockThresholdUnspecified HarmBlockThreshold = "HARM_BLOCK_THRESHOLD_UNSPECIFIED"
	// Block low threshold and above (i.e. block more).
	HarmBlockThresholdBlockLowAndAbove HarmBlockThreshold = "BLOCK_LOW_AND_ABOVE"
	// Block medium threshold and above.
	HarmBlockThresholdBlockMediumAndAbove HarmBlockThreshold = "BLOCK_MEDIUM_AND_ABOVE"
	// Block only high threshold (i.e. block less).
	HarmBlockThresholdBlockOnlyHigh HarmBlockThreshold = "BLOCK_ONLY_HIGH"
	// Block none.
	HarmBlockThresholdBlockNone HarmBlockThreshold = "BLOCK_NONE"
	// Turn off the safety filter.
	HarmBlockThresholdOff HarmBlockThreshold = "OFF"
)

type HarmCategory

type HarmCategory string

Harm category.

const (
	// The harm category is unspecified.
	HarmCategoryUnspecified HarmCategory = "HARM_CATEGORY_UNSPECIFIED"
	// The harm category is hate speech.
	HarmCategoryHateSpeech HarmCategory = "HARM_CATEGORY_HATE_SPEECH"
	// The harm category is dangerous content.
	HarmCategoryDangerousContent HarmCategory = "HARM_CATEGORY_DANGEROUS_CONTENT"
	// The harm category is harassment.
	HarmCategoryHarassment HarmCategory = "HARM_CATEGORY_HARASSMENT"
	// The harm category is sexually explicit content.
	HarmCategorySexuallyExplicit HarmCategory = "HARM_CATEGORY_SEXUALLY_EXPLICIT"
	// The harm category is civic integrity.
	HarmCategoryCivicIntegrity HarmCategory = "HARM_CATEGORY_CIVIC_INTEGRITY"
)

type HarmProbability

type HarmProbability string

Harm probability levels in the content.

const (
	// Harm probability unspecified.
	HarmProbabilityUnspecified HarmProbability = "HARM_PROBABILITY_UNSPECIFIED"
	// Negligible level of harm.
	HarmProbabilityNegligible HarmProbability = "NEGLIGIBLE"
	// Low level of harm.
	HarmProbabilityLow HarmProbability = "LOW"
	// Medium level of harm.
	HarmProbabilityMedium HarmProbability = "MEDIUM"
	// High level of harm.
	HarmProbabilityHigh HarmProbability = "HIGH"
)

type HarmSeverity

type HarmSeverity string

Harm severity levels in the content.

const (
	// Harm severity unspecified.
	HarmSeverityUnspecified HarmSeverity = "HARM_SEVERITY_UNSPECIFIED"
	// Negligible level of harm severity.
	HarmSeverityNegligible HarmSeverity = "HARM_SEVERITY_NEGLIGIBLE"
	// Low level of harm severity.
	HarmSeverityLow HarmSeverity = "HARM_SEVERITY_LOW"
	// Medium level of harm severity.
	HarmSeverityMedium HarmSeverity = "HARM_SEVERITY_MEDIUM"
	// High level of harm severity.
	HarmSeverityHigh HarmSeverity = "HARM_SEVERITY_HIGH"
)

type Image

type Image struct {
	// The Cloud Storage URI of the image. “Image“ can contain a value
	// for this field or the “image_bytes“ field but not both.
	GCSURI string `json:"gcsUri,omitempty"`
	// The image bytes data. “Image“ can contain a value for this field
	// or the “gcs_uri“ field but not both.
	ImageBytes []byte `json:"imageBytes,omitempty"`
	// The MIME type of the image.
	MIMEType string `json:"mimeType,omitempty"`
}

An image.

type ImagePromptLanguage added in v0.1.0

type ImagePromptLanguage string

Enum that specifies the language of the text in the prompt.

const (
	ImagePromptLanguageAuto ImagePromptLanguage = "auto"
	ImagePromptLanguageEn   ImagePromptLanguage = "en"
	ImagePromptLanguageJa   ImagePromptLanguage = "ja"
	ImagePromptLanguageKo   ImagePromptLanguage = "ko"
	ImagePromptLanguageHi   ImagePromptLanguage = "hi"
)

type Language

type Language string

Programming language of the `code`.

const (
	// Unspecified language. This value should not be used.
	LanguageUnspecified Language = "LANGUAGE_UNSPECIFIED"
	// Python >= 3.10, with numpy and simpy available.
	LanguagePython Language = "PYTHON"
)

type ListCachedContentsConfig added in v0.4.0

type ListCachedContentsConfig struct {
	// Used to override HTTP request options.
	HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"`
	// PageSize specifies the maximum number of cached contents to return per API call.
	// This setting does not affect the total number of cached contents returned by the
	// All() function during iteration; it only controls how many items are retrieved in
	// each individual request to the server. If zero, the server will use a default value.
	// Setting a positive value can be useful for managing the size and frequency of API
	// calls.
	PageSize int32 `json:"pageSize,omitempty"`
	// PageToken represents a token used for pagination in API responses. It's an opaque
	// string that should be passed to subsequent requests to retrieve the next page of
	// results. An empty PageToken typically indicates that there are no further pages available.
	PageToken string `json:"pageToken,omitempty"`
}

Config for caches.list method.

type ListCachedContentsResponse added in v0.4.0

type ListCachedContentsResponse struct {
	NextPageToken string `json:"nextPageToken,omitempty"`
	// List of cached contents.
	CachedContents []*CachedContent `json:"cachedContents,omitempty"`
}

type ListModelsConfig added in v0.5.0

type ListModelsConfig struct {
	// Used to override HTTP request options.
	HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"`

	PageSize *int32 `json:"pageSize,omitempty"`

	PageToken string `json:"pageToken,omitempty"`

	Filter string `json:"filter,omitempty"`
	// QueryBase is a boolean flag to control whether to query base models or tuned models.
	// If nil, then SDK will use the default value Ptr(true).
	QueryBase *bool `json:"queryBase,omitempty"`
}

type ListModelsResponse added in v0.5.0

type ListModelsResponse struct {
	NextPageToken string `json:"nextPageToken,omitempty"`

	Models []*Model `json:"models,omitempty"`
}

type Live

type Live struct {
	// contains filtered or unexported fields
}

Live can be used to create a realtime connection to the API. It is initiated when creating a client. You don't need to create a new Live object. The live module is experimental.

client, _ := genai.NewClient(ctx, &genai.ClientConfig{})
session, _ := client.Live.Connect(model, &genai.LiveConnectConfig{}).

func (*Live) Connect

func (r *Live) Connect(model string, config *LiveConnectConfig) (*Session, error)

Connect establishes a realtime connection to the specified model with given configuration. It returns a Session object representing the connection or an error if the connection fails. The live module is experimental.

type LiveClientContent

type LiveClientContent struct {
	// The content appended to the current conversation with the model.
	// For single-turn queries, this is a single instance. For multi-turn
	// queries, this is a repeated field that contains conversation history and
	// latest request.
	Turns []*Content `json:"turns,omitempty"`
	// If true, indicates that the server content generation should start with
	// the currently accumulated prompt. Otherwise, the server will await
	// additional messages before starting generation.
	TurnComplete bool `json:"turnComplete,omitempty"`
}

Incremental update of the current conversation delivered from the client. All the content here will unconditionally be appended to the conversation history and used as part of the prompt to the model to generate content. A message here will interrupt any current model generation.

type LiveClientMessage

type LiveClientMessage struct {
	// Message to be sent by the system when connecting to the API. SDK users should not
	// send this message.
	Setup *LiveClientSetup `json:"setup,omitempty"`
	// Incremental update of the current conversation delivered from the client.
	ClientContent *LiveClientContent `json:"clientContent,omitempty"`
	// User input that is sent in real time.
	RealtimeInput *LiveClientRealtimeInput `json:"realtimeInput,omitempty"`
	// Response to a `ToolCallMessage` received from the server.
	ToolResponse *LiveClientToolResponse `json:"toolResponse,omitempty"`
}

Messages sent by the client in the API call.

type LiveClientRealtimeInput

type LiveClientRealtimeInput struct {
	// Inlined bytes data for media input.
	MediaChunks []*Blob `json:"mediaChunks,omitempty"`
}

User input that is sent in real time. This is different from `ClientContentUpdate` in a few ways:

  • Can be sent continuously without interruption to model generation.
  • If there is a need to mix data interleaved across the `ClientContentUpdate` and the `RealtimeUpdate`, server attempts to optimize for best response, but there are no guarantees.
  • End of turn is not explicitly specified, but is rather derived from user activity (for example, end of speech).
  • Even before the end of turn, the data is processed incrementally to optimize for a fast start of the response from the model.
  • Is always assumed to be the user's input (cannot be used to populate conversation history).

type LiveClientSetup

type LiveClientSetup struct {
	// The fully qualified name of the publisher model or tuned model endpoint to
	// use.
	Model string `json:"model,omitempty"`
	// The generation configuration for the session.
	// The following fields are supported:
	//   - `response_logprobs`
	//   - `response_mime_type`
	//   - `logprobs`
	//   - `response_schema`
	//   - `stop_sequence`
	//   - `routing_config`
	//   - `audio_timestamp`
	GenerationConfig *GenerationConfig `json:"generationConfig,omitempty"`
	// The user provided system instructions for the model.
	// Note: only text should be used in parts and content in each part will be
	// in a separate paragraph.
	SystemInstruction *Content `json:"systemInstruction,omitempty"`
	// A list of `Tools` the model may use to generate the next response.
	// A `Tool` is a piece of code that enables the system to interact with
	// external systems to perform an action, or set of actions, outside of
	// knowledge and scope of the model.
	Tools []*Tool `json:"tools,omitempty"`
}

Message contains configuration that will apply for the duration of the streaming session.

type LiveClientToolResponse

type LiveClientToolResponse struct {
	// The response to the function calls.
	FunctionResponses []*FunctionResponse `json:"functionResponses,omitempty"`
}

Client generated response to a `ToolCall` received from the server. Individual `FunctionResponse` objects are matched to the respective `FunctionCall` objects by the `id` field. Note that in the unary and server-streaming GenerateContent APIs function calling happens by exchanging the `Content` parts, while in the bidi GenerateContent APIs function calling happens over this dedicated set of messages.

type LiveConnectConfig

type LiveConnectConfig struct {
	// The generation configuration for the session.
	GenerationConfig *GenerationConfig `json:"generationConfig,omitempty"`
	// The requested modalities of the response. Represents the set of
	// modalities that the model can return. Defaults to AUDIO if not specified.
	ResponseModalities []Modality `json:"responseModalities,omitempty"`
	// The speech generation configuration.
	SpeechConfig *SpeechConfig `json:"speechConfig,omitempty"`
	// The user provided system instructions for the model.
	// Note: only text should be used in parts and content in each part will be
	// in a separate paragraph.
	SystemInstruction *Content `json:"systemInstruction,omitempty"`
	// A list of `Tools` the model may use to generate the next response.
	// A `Tool` is a piece of code that enables the system to interact with
	// external systems to perform an action, or set of actions, outside of
	// knowledge and scope of the model.
	Tools []*Tool `json:"tools,omitempty"`
}

Session config for the API connection.

type LiveServerContent

type LiveServerContent struct {
	// The content that the model has generated as part of the current conversation with
	// the user.
	ModelTurn *Content `json:"modelTurn,omitempty"`
	// If true, indicates that the model is done generating. Generation will only start
	// in response to additional client messages. Can be set alongside `content`, indicating
	// that the `content` is the last in the turn.
	TurnComplete bool `json:"turnComplete,omitempty"`
	// If true, indicates that a client message has interrupted current model generation.
	// If the client is playing out the content in realtime, this is a good signal to stop
	// and empty the current queue.
	Interrupted bool `json:"interrupted,omitempty"`
}

Incremental server update generated by the model in response to client messages. Content is generated as quickly as possible, and not in real time. Clients may choose to buffer and play it out in real time.

type LiveServerMessage

type LiveServerMessage struct {
	// Sent in response to a `LiveClientSetup` message from the client.
	SetupComplete *LiveServerSetupComplete `json:"setupComplete,omitempty"`
	// Content generated by the model in response to client messages.
	ServerContent *LiveServerContent `json:"serverContent,omitempty"`
	// Request for the client to execute the `function_calls` and return the responses with
	// the matching `id`s.
	ToolCall *LiveServerToolCall `json:"toolCall,omitempty"`
	// Notification for the client that a previously issued `ToolCallMessage` with the specified
	// `id`s should have been not executed and should be cancelled.
	ToolCallCancellation *LiveServerToolCallCancellation `json:"toolCallCancellation,omitempty"`
}

Response message for API call.

type LiveServerSetupComplete

type LiveServerSetupComplete struct {
}

Sent in response to a `LiveGenerateContentSetup` message from the client.

type LiveServerToolCall

type LiveServerToolCall struct {
	// The function call to be executed.
	FunctionCalls []*FunctionCall `json:"functionCalls,omitempty"`
}

Request for the client to execute the `function_calls` and return the responses with the matching `id`s.

type LiveServerToolCallCancellation

type LiveServerToolCallCancellation struct {
	// The IDs of the tool calls to be cancelled.
	IDs []string `json:"ids,omitempty"`
}

Notification for the client that a previously issued `ToolCallMessage` with the specified `id`s should have been not executed and should be cancelled. If there were side-effects to those tool calls, clients may attempt to undo the tool calls. This message occurs only in cases where the clients interrupt server turns.

type LogprobsResult

type LogprobsResult struct {
	// Length = total number of decoding steps. The chosen candidates may or may not be
	// in top_candidates.
	ChosenCandidates []*LogprobsResultCandidate `json:"chosenCandidates,omitempty"`
	// Length = total number of decoding steps.
	TopCandidates []*LogprobsResultTopCandidates `json:"topCandidates,omitempty"`
}

Logprobs Result

type LogprobsResultCandidate

type LogprobsResultCandidate struct {
	// The candidate's log probability. If nil, then no LogProbability is returned by the
	// API.
	LogProbability *float32 `json:"logProbability,omitempty"`
	// The candidate's token string value.
	Token string `json:"token,omitempty"`
	// The candidate's token ID value. If nil, then no TokenID is returned by the API.
	TokenID *int32 `json:"tokenId,omitempty"`
}

Candidate for the logprobs token and score.

type LogprobsResultTopCandidates

type LogprobsResultTopCandidates struct {
	// Sorted by log probability in descending order.
	Candidates []*LogprobsResultCandidate `json:"candidates,omitempty"`
}

Candidates with top log probabilities at each decoding step.

type MaskReferenceConfig

type MaskReferenceConfig struct {
	// Prompts the model to generate a mask instead of you needing to
	// provide one (unless MASK_MODE_USER_PROVIDED is used).
	MaskMode MaskReferenceMode `json:"maskMode,omitempty"`
	// A list of up to 5 class IDs to use for semantic segmentation.
	// Automatically creates an image mask based on specific objects.
	SegmentationClasses []int32 `json:"segmentationClasses,omitempty"`
	// Dilation percentage of the mask provided. Float between 0 and 1. If nil, then API
	// will determine the default value.
	MaskDilation *float32 `json:"maskDilation,omitempty"`
}

Configuration for a Mask reference image.

type MaskReferenceImage

type MaskReferenceImage struct {
	// The reference image for the editing operation.
	ReferenceImage *Image `json:"referenceImage,omitempty"`
	// The ID of the reference image.
	ReferenceID int32 `json:"referenceId,omitempty"`

	// Configuration for the mask reference image.
	Config *MaskReferenceConfig `json:"config,omitempty"`
	// contains filtered or unexported fields
}

MaskReferenceImage is an image whose non-zero values indicate where to edit the base image. If the user provides a mask image, the mask must be in the same dimensions as the raw image. MaskReferenceImage encapsulates either a mask image provided by the user and configs for the user provided mask, or only config parameters for the model to generate a mask.

func NewMaskReferenceImage added in v0.5.0

func NewMaskReferenceImage(referenceImage *Image, referenceID int32, config *MaskReferenceConfig) *MaskReferenceImage

NewMaskReferenceImage creates a new MaskReferenceImage.

type MaskReferenceMode

type MaskReferenceMode string

Enum representing the mask mode of a mask reference image.

const (
	MaskReferenceModeMaskModeDefault      MaskReferenceMode = "MASK_MODE_DEFAULT"
	MaskReferenceModeMaskModeUserProvided MaskReferenceMode = "MASK_MODE_USER_PROVIDED"
	MaskReferenceModeMaskModeBackground   MaskReferenceMode = "MASK_MODE_BACKGROUND"
	MaskReferenceModeMaskModeForeground   MaskReferenceMode = "MASK_MODE_FOREGROUND"
	MaskReferenceModeMaskModeSemantic     MaskReferenceMode = "MASK_MODE_SEMANTIC"
)

type MediaResolution

type MediaResolution string

The media resolution to use.

const (
	// Media resolution has not been set
	MediaResolutionUnspecified MediaResolution = "MEDIA_RESOLUTION_UNSPECIFIED"
	// Media resolution set to low (64 tokens).
	MediaResolutionLow MediaResolution = "MEDIA_RESOLUTION_LOW"
	// Media resolution set to medium (256 tokens).
	MediaResolutionMedium MediaResolution = "MEDIA_RESOLUTION_MEDIUM"
	// Media resolution set to high (zoomed reframing with 256 tokens).
	MediaResolutionHigh MediaResolution = "MEDIA_RESOLUTION_HIGH"
)

type Modality added in v0.1.0

type Modality string

Server content modalities.

const (
	// The modality is unspecified.
	ModalityUnspecified Modality = "MODALITY_UNSPECIFIED"
	// Indicates the model should return text
	ModalityText Modality = "TEXT"
	// Indicates the model should return images.
	ModalityImage Modality = "IMAGE"
	// Indicates the model should return images.
	ModalityAudio Modality = "AUDIO"
)

type Mode

type Mode string

The mode of the predictor to be used in dynamic retrieval.

const (
	// Always trigger retrieval.
	ModeUnspecified Mode = "MODE_UNSPECIFIED"
	// Run retrieval only when system decides it is necessary.
	ModeDynamic Mode = "MODE_DYNAMIC"
)

type Model added in v0.4.0

type Model struct {
	// Resource name of the model.
	Name string `json:"name,omitempty"`
	// Display name of the model.
	DisplayName string `json:"displayName,omitempty"`
	// Description of the model.
	Description string `json:"description,omitempty"`
	// Version ID of the model. A new version is committed when a new
	// model version is uploaded or trained under an existing model ID. The
	// version ID is an auto-incrementing decimal number in string
	// representation.
	Version string `json:"version,omitempty"`
	// List of deployed models created from this base model. Note that a
	// model could have been deployed to endpoints in different locations.
	Endpoints []*Endpoint `json:"endpoints,omitempty"`
	// Labels with user-defined metadata to organize your models.
	Labels map[string]string `json:"labels,omitempty"`
	// Information about the tuned model from the base model.
	TunedModelInfo *TunedModelInfo `json:"tunedModelInfo,omitempty"`
	// The maximum number of input tokens that the model can handle.
	InputTokenLimit int32 `json:"inputTokenLimit,omitempty"`
	// The maximum number of output tokens that the model can generate.
	OutputTokenLimit int32 `json:"outputTokenLimit,omitempty"`
	// List of actions that are supported by the model.
	SupportedActions []string `json:"supportedActions,omitempty"`
}

A trained machine learning model.

type Models

type Models struct {
	// contains filtered or unexported fields
}

func (Models) All added in v0.5.0

func (m Models) All(ctx context.Context) iter.Seq2[*Model, error]

func (Models) ComputeTokens added in v0.1.0

func (m Models) ComputeTokens(ctx context.Context, model string, contents []*Content, config *ComputeTokensConfig) (*ComputeTokensResponse, error)

func (Models) CountTokens added in v0.1.0

func (m Models) CountTokens(ctx context.Context, model string, contents []*Content, config *CountTokensConfig) (*CountTokensResponse, error)

func (Models) Delete added in v0.4.0

func (m Models) Delete(ctx context.Context, model string, config *DeleteModelConfig) (*DeleteModelResponse, error)

func (Models) EditImage added in v0.5.0

func (m Models) EditImage(ctx context.Context, model, prompt string, referenceImages []ReferenceImage, config *EditImageConfig) (*EditImageResponse, error)

EditImage calls the EditImage method on the model.

func (Models) EmbedContent added in v0.5.0

func (m Models) EmbedContent(ctx context.Context, model string, contents []*Content, config *EmbedContentConfig) (*EmbedContentResponse, error)

func (Models) GenerateContent

func (m Models) GenerateContent(ctx context.Context, model string, contents []*Content, config *GenerateContentConfig) (*GenerateContentResponse, error)

GenerateContent calls the GenerateContent method on the model.

Example (CodeExecution_geminiapi)

This example shows how to call the GenerateContent method with code execution to Gemini API.

package main

import (
	"context"
	"encoding/json"
	"fmt"
	"log"

	"google.golang.org/genai"
)

// Your Google API key
const apiKey = "your-api-key"

func main() {
	ctx := context.Background()
	client, err := genai.NewClient(ctx, &genai.ClientConfig{
		APIKey:  apiKey,
		Backend: genai.BackendGeminiAPI,
	})
	if err != nil {
		log.Fatal(err)
	}

	// Call the GenerateContent method.
	result, err := client.Models.GenerateContent(ctx,
		"gemini-2.0-flash-exp",
		genai.Text("What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50."),
		&genai.GenerateContentConfig{
			Tools: []*genai.Tool{
				{CodeExecution: &genai.ToolCodeExecution{}},
			},
		},
	)
	if err != nil {
		log.Fatal(err)
	}
	debugPrint(result)
}

func debugPrint[T any](r *T) {

	response, err := json.MarshalIndent(*r, "", "  ")
	if err != nil {
		log.Fatal(err)
	}

	fmt.Println(string(response))
}
Output:

Example (CodeExecution_vertexai)

This example shows how to call the GenerateContent method with code execution to Vertex AI.

package main

import (
	"context"
	"encoding/json"
	"fmt"
	"log"

	"google.golang.org/genai"
)

// Your GCP project
const project = "your-project"

// A GCP location like "us-central1"
const location = "some-gcp-location"

func main() {
	ctx := context.Background()
	client, err := genai.NewClient(ctx, &genai.ClientConfig{
		Project:  project,
		Location: location,
		Backend:  genai.BackendVertexAI,
	})
	if err != nil {
		log.Fatal(err)
	}

	// Call the GenerateContent method.
	result, err := client.Models.GenerateContent(ctx,
		"gemini-2.0-flash-exp",
		genai.Text("What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50."),
		&genai.GenerateContentConfig{
			Tools: []*genai.Tool{
				{CodeExecution: &genai.ToolCodeExecution{}},
			},
		},
	)
	if err != nil {
		log.Fatal(err)
	}
	debugPrint(result)
}

func debugPrint[T any](r *T) {

	response, err := json.MarshalIndent(*r, "", "  ")
	if err != nil {
		log.Fatal(err)
	}

	fmt.Println(string(response))
}
Output:

Example (Config_geminiapi)

This example shows how to call the GenerateContent method with GenerateContentConfig to Gemini API.

package main

import (
	"context"
	"encoding/json"
	"fmt"
	"log"

	"google.golang.org/genai"
)

// Your Google API key
const apiKey = "your-api-key"

func main() {
	ctx := context.Background()
	client, err := genai.NewClient(ctx, &genai.ClientConfig{
		APIKey:  apiKey,
		Backend: genai.BackendGeminiAPI,
	})
	if err != nil {
		log.Fatal(err)
	}

	// Call the GenerateContent method.
	result, err := client.Models.GenerateContent(ctx,
		"gemini-2.0-flash-exp",
		genai.Text("Tell me about New York?"),
		&genai.GenerateContentConfig{
			Temperature:      genai.Ptr[float32](0.5),
			TopP:             genai.Ptr[float32](0.5),
			TopK:             genai.Ptr[float32](2.0),
			ResponseMIMEType: "application/json",
			StopSequences:    []string{"\n"},
			CandidateCount:   genai.Ptr[int32](2),
			Seed:             genai.Ptr[int32](42),
			MaxOutputTokens:  genai.Ptr[int32](128),
			PresencePenalty:  genai.Ptr[float32](0.5),
			FrequencyPenalty: genai.Ptr[float32](0.5),
		},
	)
	if err != nil {
		log.Fatal(err)
	}
	debugPrint(result)
}

func debugPrint[T any](r *T) {

	response, err := json.MarshalIndent(*r, "", "  ")
	if err != nil {
		log.Fatal(err)
	}

	fmt.Println(string(response))
}
Output:

Example (Config_vertexai)

This example shows how to call the GenerateContent method with GenerateContentConfig to Vertex AI.

package main

import (
	"context"
	"encoding/json"
	"fmt"
	"log"

	"google.golang.org/genai"
)

// Your GCP project
const project = "your-project"

// A GCP location like "us-central1"
const location = "some-gcp-location"

func main() {
	ctx := context.Background()
	client, err := genai.NewClient(ctx, &genai.ClientConfig{
		Project:  project,
		Location: location,
		Backend:  genai.BackendVertexAI,
	})
	if err != nil {
		log.Fatal(err)
	}

	// Call the GenerateContent method.
	result, err := client.Models.GenerateContent(ctx,
		"gemini-2.0-flash-exp",
		genai.Text("Tell me about New York?"),
		&genai.GenerateContentConfig{
			Temperature:      genai.Ptr[float32](0.5),
			TopP:             genai.Ptr[float32](0.5),
			TopK:             genai.Ptr[float32](2.0),
			ResponseMIMEType: "application/json",
			StopSequences:    []string{"\n"},
			CandidateCount:   genai.Ptr[int32](2),
			Seed:             genai.Ptr[int32](42),
			MaxOutputTokens:  genai.Ptr[int32](128),
			PresencePenalty:  genai.Ptr[float32](0.5),
			FrequencyPenalty: genai.Ptr[float32](0.5),
		},
	)
	if err != nil {
		log.Fatal(err)
	}
	debugPrint(result)
}

func debugPrint[T any](r *T) {

	response, err := json.MarshalIndent(*r, "", "  ")
	if err != nil {
		log.Fatal(err)
	}

	fmt.Println(string(response))
}
Output:

Example (GcsURI_vertexai)

This example shows how to call the GenerateContent method with GCS URI to Vertex AI.

package main

import (
	"context"
	"encoding/json"
	"fmt"
	"log"

	"google.golang.org/genai"
)

// Your GCP project
const project = "your-project"

// A GCP location like "us-central1"
const location = "some-gcp-location"

func main() {
	ctx := context.Background()
	client, err := genai.NewClient(ctx, &genai.ClientConfig{
		Project:  project,
		Location: location,
		Backend:  genai.BackendVertexAI,
	})
	if err != nil {
		log.Fatal(err)
	}

	// Call the GenerateContent method.
	parts := []*genai.Part{
		{Text: "What's this video about?"},
		{FileData: &genai.FileData{FileURI: "gs://cloud-samples-data/video/animals.mp4", MIMEType: "video/mp4"}},
	}
	contents := []*genai.Content{{Parts: parts}}

	result, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash-exp", contents, nil)
	if err != nil {
		log.Fatal(err)
	}
	debugPrint(result)
}

func debugPrint[T any](r *T) {

	response, err := json.MarshalIndent(*r, "", "  ")
	if err != nil {
		log.Fatal(err)
	}

	fmt.Println(string(response))
}
Output:

Example (GoogleSearchRetrieval_geminiapi)

This example shows how to call the GenerateContent method with Google Search Retrieval to Gemini API.

package main

import (
	"context"
	"encoding/json"
	"fmt"
	"log"

	"google.golang.org/genai"
)

// Your Google API key
const apiKey = "your-api-key"

func main() {
	ctx := context.Background()
	client, err := genai.NewClient(ctx, &genai.ClientConfig{
		APIKey:  apiKey,
		Backend: genai.BackendGeminiAPI,
	})
	if err != nil {
		log.Fatal(err)
	}

	// Call the GenerateContent method.
	result, err := client.Models.GenerateContent(ctx,
		"gemini-2.0-flash-exp",
		genai.Text("Tell me about New York?"),
		&genai.GenerateContentConfig{
			Tools: []*genai.Tool{
				{GoogleSearchRetrieval: &genai.GoogleSearchRetrieval{}},
			},
		},
	)
	if err != nil {
		log.Fatal(err)
	}
	debugPrint(result)
}

func debugPrint[T any](r *T) {

	response, err := json.MarshalIndent(*r, "", "  ")
	if err != nil {
		log.Fatal(err)
	}

	fmt.Println(string(response))
}
Output:

Example (GoogleSearchRetrieval_vertexai)

This example shows how to call the GenerateContent method with Google Search Retrieval to Vertex AI.

package main

import (
	"context"
	"encoding/json"
	"fmt"
	"log"

	"google.golang.org/genai"
)

// Your GCP project
const project = "your-project"

// A GCP location like "us-central1"
const location = "some-gcp-location"

func main() {
	ctx := context.Background()
	client, err := genai.NewClient(ctx, &genai.ClientConfig{
		Project:  project,
		Location: location,
		Backend:  genai.BackendVertexAI,
	})
	if err != nil {
		log.Fatal(err)
	}

	// Call the GenerateContent method.
	parts := []*genai.Part{{Text: "Tell me about New York?"}}
	contents := []*genai.Content{{Parts: parts}}

	result, err := client.Models.GenerateContent(ctx,
		"gemini-2.0-flash-exp",
		contents,
		&genai.GenerateContentConfig{
			Tools: []*genai.Tool{
				{GoogleSearchRetrieval: &genai.GoogleSearchRetrieval{}},
			},
		},
	)
	if err != nil {
		log.Fatal(err)
	}
	debugPrint(result)
}

func debugPrint[T any](r *T) {

	response, err := json.MarshalIndent(*r, "", "  ")
	if err != nil {
		log.Fatal(err)
	}

	fmt.Println(string(response))
}
Output:

Example (HttpURL_vertexai)

This example shows how to call the GenerateContent method with HTTP URL to Vertex AI.

package main

import (
	"context"
	"encoding/json"
	"fmt"
	"log"

	"google.golang.org/genai"
)

// Your Google API key
const apiKey = "your-api-key"

func main() {
	ctx := context.Background()
	client, err := genai.NewClient(ctx, &genai.ClientConfig{
		APIKey:  apiKey,
		Backend: genai.BackendGeminiAPI,
	})
	if err != nil {
		log.Fatal(err)
	}

	// Call the GenerateContent method.
	parts := []*genai.Part{
		{Text: "What's this picture about?"},
		{FileData: &genai.FileData{FileURI: "https://storage.googleapis.com/cloud-samples-data/generative-ai/image/scones.jpg", MIMEType: "image/jpeg"}},
	}
	contents := []*genai.Content{{Parts: parts}}

	result, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash-exp", contents, nil)
	if err != nil {
		log.Fatal(err)
	}
	debugPrint(result)
}

func debugPrint[T any](r *T) {

	response, err := json.MarshalIndent(*r, "", "  ")
	if err != nil {
		log.Fatal(err)
	}

	fmt.Println(string(response))
}
Output:

Example (InlineAudio_geminiapi)

This example shows how to call the GenerateContent method with a inline audio file to Gemini API.

package main

import (
	"context"
	"encoding/json"
	"fmt"
	"io"
	"log"
	"net/http"

	"google.golang.org/genai"
)

// Your Google API key
const apiKey = "your-api-key"

func main() {
	ctx := context.Background()
	client, err := genai.NewClient(ctx, &genai.ClientConfig{
		APIKey:  apiKey,
		Backend: genai.BackendGeminiAPI,
	})
	if err != nil {
		log.Fatal(err)
	}

	resp, err := http.Get("your audio url")
	if err != nil {
		fmt.Println("Error fetching image:", err)
		return
	}
	defer resp.Body.Close()
	data, err := io.ReadAll(resp.Body)
	if err != nil {
		log.Fatal(err)
	}

	parts := []*genai.Part{
		{Text: "What's this music about?"},
		{InlineData: &genai.Blob{Data: data, MIMEType: "audio/mp3"}},
	}
	contents := []*genai.Content{{Parts: parts}}

	// Call the GenerateContent method.
	result, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash-exp", contents, nil)
	if err != nil {
		log.Fatal(err)
	}
	debugPrint(result)
}

func debugPrint[T any](r *T) {

	response, err := json.MarshalIndent(*r, "", "  ")
	if err != nil {
		log.Fatal(err)
	}

	fmt.Println(string(response))
}
Output:

Example (InlineAudio_vertexai)

This example shows how to call the GenerateContent method with a inline audio file to Vertex AI.

package main

import (
	"context"
	"encoding/json"
	"fmt"
	"io"
	"log"
	"net/http"

	"google.golang.org/genai"
)

// Your GCP project
const project = "your-project"

// A GCP location like "us-central1"
const location = "some-gcp-location"

func main() {
	ctx := context.Background()
	client, err := genai.NewClient(ctx, &genai.ClientConfig{
		Project:  project,
		Location: location,
		Backend:  genai.BackendVertexAI,
	})
	if err != nil {
		log.Fatal(err)
	}

	// Read the audio file.
	resp, err := http.Get("your audio url")
	if err != nil {
		fmt.Println("Error fetching image:", err)
		return
	}
	defer resp.Body.Close()
	data, err := io.ReadAll(resp.Body)
	if err != nil {
		log.Fatal(err)
	}

	parts := []*genai.Part{
		{Text: "What's this music about?"},
		{InlineData: &genai.Blob{Data: data, MIMEType: "audio/mp3"}},
	}
	contents := []*genai.Content{{Parts: parts}}

	// Call the GenerateContent method.
	result, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash-exp", contents, nil)
	if err != nil {
		log.Fatal(err)
	}
	debugPrint(result)
}

func debugPrint[T any](r *T) {

	response, err := json.MarshalIndent(*r, "", "  ")
	if err != nil {
		log.Fatal(err)
	}

	fmt.Println(string(response))
}
Output:

Example (InlineImage_geminiapi)

This example shows how to call the GenerateContent method with inline image to Gemini API.

package main

import (
	"context"
	"encoding/json"
	"fmt"
	"io"
	"log"
	"net/http"

	"google.golang.org/genai"
)

// Your Google API key
const apiKey = "your-api-key"

func main() {
	ctx := context.Background()
	client, err := genai.NewClient(ctx, &genai.ClientConfig{
		APIKey:  apiKey,
		Backend: genai.BackendGeminiAPI,
	})
	if err != nil {
		log.Fatal(err)
	}

	// Read the image data from a url.
	resp, err := http.Get("https://storage.googleapis.com/cloud-samples-data/generative-ai/image/scones.jpg")
	if err != nil {
		fmt.Println("Error fetching image:", err)
		return
	}
	defer resp.Body.Close()
	data, err := io.ReadAll(resp.Body)
	if err != nil {
		log.Fatal(err)
	}

	parts := []*genai.Part{
		{Text: "What's this image about?"},
		{InlineData: &genai.Blob{Data: data, MIMEType: "image/jpeg"}},
	}
	contents := []*genai.Content{{Parts: parts}}

	// Call the GenerateContent method.
	result, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash-exp", contents, nil)
	if err != nil {
		log.Fatal(err)
	}
	debugPrint(result)
}

func debugPrint[T any](r *T) {

	response, err := json.MarshalIndent(*r, "", "  ")
	if err != nil {
		log.Fatal(err)
	}

	fmt.Println(string(response))
}
Output:

Example (InlineImage_vertexai)

This example shows how to call the GenerateContent method with inline image to Vertex AI.

package main

import (
	"context"
	"encoding/json"
	"fmt"
	"io"
	"log"
	"net/http"

	"google.golang.org/genai"
)

// Your GCP project
const project = "your-project"

// A GCP location like "us-central1"
const location = "some-gcp-location"

func main() {
	ctx := context.Background()
	client, err := genai.NewClient(ctx, &genai.ClientConfig{
		Project:  project,
		Location: location,
		Backend:  genai.BackendVertexAI,
	})
	if err != nil {
		log.Fatal(err)
	}

	// Read the image data from a url.
	resp, err := http.Get("https://storage.googleapis.com/cloud-samples-data/generative-ai/image/scones.jpg")
	if err != nil {
		fmt.Println("Error fetching image:", err)
		return
	}
	defer resp.Body.Close()
	data, err := io.ReadAll(resp.Body)
	if err != nil {
		log.Fatal(err)
	}

	parts := []*genai.Part{
		{Text: "What's this image about?"},
		{InlineData: &genai.Blob{Data: data, MIMEType: "image/jpeg"}},
	}
	contents := []*genai.Content{{Parts: parts}}

	// Call the GenerateContent method.
	result, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash-exp", contents, nil)
	if err != nil {
		log.Fatal(err)
	}
	debugPrint(result)
}

func debugPrint[T any](r *T) {

	response, err := json.MarshalIndent(*r, "", "  ")
	if err != nil {
		log.Fatal(err)
	}

	fmt.Println(string(response))
}
Output:

Example (InlinePDF_geminiapi)

This example shows how to call the GenerateContent method with a inline pdf file to Gemini API.

package main

import (
	"context"
	"encoding/json"
	"fmt"
	"io"
	"log"
	"net/http"

	"google.golang.org/genai"
)

// Your Google API key
const apiKey = "your-api-key"

func main() {
	ctx := context.Background()
	client, err := genai.NewClient(ctx, &genai.ClientConfig{
		APIKey:  apiKey,
		Backend: genai.BackendGeminiAPI,
	})
	if err != nil {
		log.Fatal(err)
	}

	// Read the pdf file.
	resp, err := http.Get("your pdf url")
	if err != nil {
		fmt.Println("Error fetching image:", err)
		return
	}
	defer resp.Body.Close()
	data, err := io.ReadAll(resp.Body)
	if err != nil {
		log.Fatal(err)
	}

	parts := []*genai.Part{
		{Text: "What's this pdf about?"},
		{InlineData: &genai.Blob{Data: data, MIMEType: "application/pdf"}},
	}
	contents := []*genai.Content{{Parts: parts}}

	// Call the GenerateContent method.
	result, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash-exp", contents, nil)
	if err != nil {
		log.Fatal(err)
	}
	debugPrint(result)
}

func debugPrint[T any](r *T) {

	response, err := json.MarshalIndent(*r, "", "  ")
	if err != nil {
		log.Fatal(err)
	}

	fmt.Println(string(response))
}
Output:

Example (InlinePDF_vertexai)

This example shows how to call the GenerateContent method with a inline pdf file to Vertex AI.

package main

import (
	"context"
	"encoding/json"
	"fmt"
	"io"
	"log"
	"net/http"

	"google.golang.org/genai"
)

// Your GCP project
const project = "your-project"

// A GCP location like "us-central1"
const location = "some-gcp-location"

func main() {
	ctx := context.Background()
	client, err := genai.NewClient(ctx, &genai.ClientConfig{
		Project:  project,
		Location: location,
		Backend:  genai.BackendVertexAI,
	})
	if err != nil {
		log.Fatal(err)
	}

	// Read the pdf file.
	resp, err := http.Get("your pdf url")
	if err != nil {
		fmt.Println("Error fetching image:", err)
		return
	}
	defer resp.Body.Close()
	data, err := io.ReadAll(resp.Body)
	if err != nil {
		log.Fatal(err)
	}

	parts := []*genai.Part{
		{Text: "What's this pdf about?"},
		{InlineData: &genai.Blob{Data: data, MIMEType: "application/pdf"}},
	}
	contents := []*genai.Content{{Parts: parts}}

	// Call the GenerateContent method.
	result, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash-exp", contents, nil)
	if err != nil {
		log.Fatal(err)
	}
	debugPrint(result)
}

func debugPrint[T any](r *T) {

	response, err := json.MarshalIndent(*r, "", "  ")
	if err != nil {
		log.Fatal(err)
	}

	fmt.Println(string(response))
}
Output:

Example (InlineVideo_geminiapi)

This example shows how to call the GenerateContent method with a inline video file to Gemini API.

package main

import (
	"context"
	"encoding/json"
	"fmt"
	"io"
	"log"
	"net/http"

	"google.golang.org/genai"
)

// Your Google API key
const apiKey = "your-api-key"

func main() {
	ctx := context.Background()
	client, err := genai.NewClient(ctx, &genai.ClientConfig{
		APIKey:  apiKey,
		Backend: genai.BackendGeminiAPI,
	})
	if err != nil {
		log.Fatal(err)
	}

	// Read the video file.
	resp, err := http.Get("your video url")
	if err != nil {
		fmt.Println("Error fetching image:", err)
		return
	}
	defer resp.Body.Close()
	data, err := io.ReadAll(resp.Body)
	if err != nil {
		log.Fatal(err)
	}

	parts := []*genai.Part{
		{Text: "What's this video about?"},
		{InlineData: &genai.Blob{Data: data, MIMEType: "video/mp4"}},
	}
	contents := []*genai.Content{{Parts: parts}}

	result, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash-exp", contents, nil)
	if err != nil {
		log.Fatal(err)
	}
	debugPrint(result)
}

func debugPrint[T any](r *T) {

	response, err := json.MarshalIndent(*r, "", "  ")
	if err != nil {
		log.Fatal(err)
	}

	fmt.Println(string(response))
}
Output:

Example (InlineVideo_vertexai)

This example shows how to call the GenerateContent method with a inline video file to Vertex AI.

package main

import (
	"context"
	"encoding/json"
	"fmt"
	"io"
	"log"
	"net/http"

	"google.golang.org/genai"
)

// Your GCP project
const project = "your-project"

// A GCP location like "us-central1"
const location = "some-gcp-location"

func main() {
	ctx := context.Background()
	client, err := genai.NewClient(ctx, &genai.ClientConfig{
		Project:  project,
		Location: location,
		Backend:  genai.BackendVertexAI,
	})
	if err != nil {
		log.Fatal(err)
	}

	// Read the video file.
	resp, err := http.Get("your video url")
	if err != nil {
		fmt.Println("Error fetching image:", err)
		return
	}
	defer resp.Body.Close()
	data, err := io.ReadAll(resp.Body)
	if err != nil {
		log.Fatal(err)
	}

	parts := []*genai.Part{
		{Text: "What's this video about?"},
		{InlineData: &genai.Blob{Data: data, MIMEType: "video/mp4"}},
	}
	contents := []*genai.Content{{Parts: parts}}

	result, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash-exp", contents, nil)
	if err != nil {
		log.Fatal(err)
	}
	debugPrint(result)
}

func debugPrint[T any](r *T) {

	response, err := json.MarshalIndent(*r, "", "  ")
	if err != nil {
		log.Fatal(err)
	}

	fmt.Println(string(response))
}
Output:

Example (SystemInstruction_geminiapi)

This example shows how to call the GenerateContent method with system instruction to Gemini API.

package main

import (
	"context"
	"encoding/json"
	"fmt"
	"log"

	"google.golang.org/genai"
)

// Your Google API key
const apiKey = "your-api-key"

func main() {
	ctx := context.Background()
	client, err := genai.NewClient(ctx, &genai.ClientConfig{
		APIKey:  apiKey,
		Backend: genai.BackendGeminiAPI,
	})
	if err != nil {
		log.Fatal(err)
	}

	// Call the GenerateContent method.
	result, err := client.Models.GenerateContent(ctx,
		"gemini-2.0-flash-exp",
		genai.Text("Tell me about New York?"),
		&genai.GenerateContentConfig{
			SystemInstruction: &genai.Content{Parts: []*genai.Part{{Text: "You are a helpful assistant."}}},
		},
	)
	if err != nil {
		log.Fatal(err)
	}
	debugPrint(result)
}

func debugPrint[T any](r *T) {

	response, err := json.MarshalIndent(*r, "", "  ")
	if err != nil {
		log.Fatal(err)
	}

	fmt.Println(string(response))
}
Output:

Example (SystemInstruction_vertexai)

This example shows how to call the GenerateContent method with system instruction to Vertex AI.

package main

import (
	"context"
	"encoding/json"
	"fmt"
	"log"

	"google.golang.org/genai"
)

// Your GCP project
const project = "your-project"

// A GCP location like "us-central1"
const location = "some-gcp-location"

func main() {
	ctx := context.Background()
	client, err := genai.NewClient(ctx, &genai.ClientConfig{
		Project:  project,
		Location: location,
		Backend:  genai.BackendVertexAI,
	})
	if err != nil {
		log.Fatal(err)
	}

	// Call the GenerateContent method.
	result, err := client.Models.GenerateContent(ctx,
		"gemini-2.0-flash-exp",
		genai.Text("Tell me about New York?"),
		&genai.GenerateContentConfig{
			SystemInstruction: &genai.Content{Parts: []*genai.Part{{Text: "You are a helpful assistant."}}},
		},
	)
	if err != nil {
		log.Fatal(err)
	}
	debugPrint(result)
}

func debugPrint[T any](r *T) {

	response, err := json.MarshalIndent(*r, "", "  ")
	if err != nil {
		log.Fatal(err)
	}

	fmt.Println(string(response))
}
Output:

Example (Text_geminiapi)

This example shows how to call the GenerateContent method with a simple text to Gemini API.

package main

import (
	"context"
	"encoding/json"
	"fmt"
	"log"

	"google.golang.org/genai"
)

// Your Google API key
const apiKey = "your-api-key"

func main() {
	ctx := context.Background()
	client, err := genai.NewClient(ctx, &genai.ClientConfig{
		APIKey:  apiKey,
		Backend: genai.BackendGeminiAPI,
	})
	if err != nil {
		log.Fatal(err)
	}

	// Call the GenerateContent method.
	result, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash-exp", genai.Text("Tell me about New York?"), nil)
	if err != nil {
		log.Fatal(err)
	}
	debugPrint(result)
}

func debugPrint[T any](r *T) {

	response, err := json.MarshalIndent(*r, "", "  ")
	if err != nil {
		log.Fatal(err)
	}

	fmt.Println(string(response))
}
Output:

Example (Text_vertexai)

This example shows how to call the GenerateContent method with a simple text to Vertex AI.

package main

import (
	"context"
	"encoding/json"
	"fmt"
	"log"

	"google.golang.org/genai"
)

// Your GCP project
const project = "your-project"

// A GCP location like "us-central1"
const location = "some-gcp-location"

func main() {
	ctx := context.Background()
	client, err := genai.NewClient(ctx, &genai.ClientConfig{
		Project:  project,
		Location: location,
		Backend:  genai.BackendVertexAI,
	})
	if err != nil {
		log.Fatal(err)
	}

	// Call the GenerateContent method.
	result, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash-exp", genai.Text("Tell me about New York?"), nil)
	if err != nil {
		log.Fatal(err)
	}
	debugPrint(result)
}

func debugPrint[T any](r *T) {

	response, err := json.MarshalIndent(*r, "", "  ")
	if err != nil {
		log.Fatal(err)
	}

	fmt.Println(string(response))
}
Output:

Example (Texts_geminiapi)

This example shows how to call the GenerateContent method with multiple texts to Gemini API.

package main

import (
	"context"
	"encoding/json"
	"fmt"
	"log"

	"google.golang.org/genai"
)

// Your Google API key
const apiKey = "your-api-key"

func main() {
	ctx := context.Background()
	client, err := genai.NewClient(ctx, &genai.ClientConfig{
		APIKey:  apiKey,
		Backend: genai.BackendGeminiAPI,
	})
	if err != nil {
		log.Fatal(err)
	}

	parts := []*genai.Part{
		{Text: "Tell me about New York?"},
		{Text: "And how about San Francison?"},
	}
	contents := []*genai.Content{{Parts: parts}}

	// Call the GenerateContent method.
	result, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash-exp", contents, nil)
	if err != nil {
		log.Fatal(err)
	}
	debugPrint(result)
}

func debugPrint[T any](r *T) {

	response, err := json.MarshalIndent(*r, "", "  ")
	if err != nil {
		log.Fatal(err)
	}

	fmt.Println(string(response))
}
Output:

Example (Texts_vertexai)

This example shows how to call the GenerateContent method with multiple texts to Vertex AI.

package main

import (
	"context"
	"encoding/json"
	"fmt"
	"log"

	"google.golang.org/genai"
)

// Your GCP project
const project = "your-project"

// A GCP location like "us-central1"
const location = "some-gcp-location"

func main() {
	ctx := context.Background()
	client, err := genai.NewClient(ctx, &genai.ClientConfig{
		Project:  project,
		Location: location,
		Backend:  genai.BackendVertexAI,
	})
	if err != nil {
		log.Fatal(err)
	}

	parts := []*genai.Part{
		{Text: "Tell me about New York?"},
		{Text: "And how about San Francison?"},
	}
	contents := []*genai.Content{{Parts: parts}}

	// Call the GenerateContent method.
	result, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash-exp", contents, nil)
	if err != nil {
		log.Fatal(err)
	}
	debugPrint(result)
}

func debugPrint[T any](r *T) {

	response, err := json.MarshalIndent(*r, "", "  ")
	if err != nil {
		log.Fatal(err)
	}

	fmt.Println(string(response))
}
Output:

Example (ThirdPartyModel_vertexai)

This example shows how to call the GenerateContent method with third party model to Vertex AI.

package main

import (
	"context"
	"encoding/json"
	"fmt"
	"log"

	"google.golang.org/genai"
)

// Your GCP project
const project = "your-project"

// A GCP location like "us-central1"
const location = "some-gcp-location"

func main() {
	ctx := context.Background()
	client, err := genai.NewClient(ctx, &genai.ClientConfig{
		Project:  project,
		Location: location,
		Backend:  genai.BackendVertexAI,
	})
	if err != nil {
		log.Fatal(err)
	}

	// Call the GenerateContent method.
	result, err := client.Models.GenerateContent(ctx,
		"meta/llama-3.2-90b-vision-instruct-maas",
		genai.Text("Tell me about New York?"),
		nil,
	)
	if err != nil {
		log.Fatal(err)
	}
	debugPrint(result)
}

func debugPrint[T any](r *T) {

	response, err := json.MarshalIndent(*r, "", "  ")
	if err != nil {
		log.Fatal(err)
	}

	fmt.Println(string(response))
}
Output:

func (Models) GenerateContentStream

func (m Models) GenerateContentStream(ctx context.Context, model string, contents []*Content, config *GenerateContentConfig) iter.Seq2[*GenerateContentResponse, error]

GenerateContentStream calls the GenerateContentStream method on the model.

Example (Text_geminiapi)

This example shows how to call the GenerateContentStream method with a simple text to Gemini API.

package main

import (
	"context"
	"fmt"
	"log"

	"google.golang.org/genai"
)

// Your Google API key
const apiKey = "your-api-key"

func main() {
	ctx := context.Background()
	client, err := genai.NewClient(ctx, &genai.ClientConfig{
		APIKey:  apiKey,
		Backend: genai.BackendGeminiAPI,
	})
	if err != nil {
		log.Fatal(err)
	}

	// Call the GenerateContentStream method.
	for result, err := range client.Models.GenerateContentStream(
		ctx,
		"gemini-2.0-flash-exp",
		genai.Text("Give me top 3 indoor kids friendly ideas."),
		nil,
	) {
		if err != nil {
			log.Fatal(err)
		}
		fmt.Print(result.Candidates[0].Content.Parts[0].Text)
	}
}
Output:

Example (Text_vertexai)

This example shows how to call the GenerateContentStream method with a simple text to Vertex AI.

package main

import (
	"context"
	"fmt"
	"log"

	"google.golang.org/genai"
)

// Your GCP project
const project = "your-project"

// A GCP location like "us-central1"
const location = "some-gcp-location"

func main() {
	ctx := context.Background()
	client, err := genai.NewClient(ctx, &genai.ClientConfig{
		Project:  project,
		Location: location,
		Backend:  genai.BackendVertexAI,
	})
	if err != nil {
		log.Fatal(err)
	}

	// Call the GenerateContentStream method.
	for result, err := range client.Models.GenerateContentStream(
		ctx,
		"gemini-2.0-flash-exp",
		genai.Text("Give me top 3 indoor kids friendly ideas."),
		nil,
	) {
		if err != nil {
			log.Fatal(err)
		}
		fmt.Print(result.Candidates[0].Content.Parts[0].Text)
	}
}
Output:

func (Models) GenerateImages added in v0.1.0

func (m Models) GenerateImages(ctx context.Context, model string, prompt string, config *GenerateImagesConfig) (*GenerateImagesResponse, error)

func (Models) Get added in v0.4.0

func (m Models) Get(ctx context.Context, model string, config *GetModelConfig) (*Model, error)

func (Models) List added in v0.5.0

func (m Models) List(ctx context.Context, config *ListModelsConfig) (Page[Model], error)

func (Models) Update added in v0.4.0

func (m Models) Update(ctx context.Context, model string, config *UpdateModelConfig) (*Model, error)

func (Models) UpscaleImage added in v0.4.0

func (m Models) UpscaleImage(ctx context.Context, model string, image *Image, upscaleFactor string, config *UpscaleImageConfig) (*UpscaleImageResponse, error)

UpscaleImage calls the upscaleImage method on the model.

type Outcome

type Outcome string

Outcome of the code execution.

const (
	// Unspecified status. This value should not be used.
	OutcomeUnspecified Outcome = "OUTCOME_UNSPECIFIED"
	// Code execution completed successfully.
	OutcomeOK Outcome = "OUTCOME_OK"
	// Code execution finished but with a failure. `stderr` should contain the reason.
	OutcomeFailed Outcome = "OUTCOME_FAILED"
	// Code execution ran for too long, and was cancelled. There may or may not be a partial
	// output present.
	OutcomeDeadlineExceeded Outcome = "OUTCOME_DEADLINE_EXCEEDED"
)

type Page added in v0.4.0

type Page[T any] struct {
	Name          string // The name of the resource.
	Items         []*T   // The items in the current page.
	NextPageToken string // The token to use to retrieve the next page of results.
	// contains filtered or unexported fields
}

Page represents a page of results from a paginated API call. It contains a slice of items and information about the next page.

func (Page[T]) Next added in v0.4.0

func (p Page[T]) Next(ctx context.Context) (Page[T], error)

Next retrieves the next page of results.

If there are no more pages, PageDone is returned. Otherwise, a new Page struct containing the next set of results is returned. Any other errors encountered during retrieval will also be returned.

type Part

type Part struct {
	// Metadata for a given video.
	VideoMetadata *VideoMetadata `json:"videoMetadata,omitempty"`
	// Indicates if the part is thought from the model.
	Thought bool `json:"thought,omitempty"`
	// Optional. Result of executing the [ExecutableCode].
	CodeExecutionResult *CodeExecutionResult `json:"codeExecutionResult,omitempty"`
	// Optional. Code generated by the model that is meant to be executed.
	ExecutableCode *ExecutableCode `json:"executableCode,omitempty"`
	// Optional. URI based data.
	FileData *FileData `json:"fileData,omitempty"`
	// Optional. A predicted [FunctionCall] returned from the model that contains a string
	// representing the [FunctionDeclaration.name] with the parameters and their values.
	FunctionCall *FunctionCall `json:"functionCall,omitempty"`
	// Optional. The result output of a [FunctionCall] that contains a string representing
	// the [FunctionDeclaration.name] and a structured JSON object containing any output
	// from the function call. It is used as context to the model.
	FunctionResponse *FunctionResponse `json:"functionResponse,omitempty"`
	// Optional. Inlined bytes data.
	InlineData *Blob `json:"inlineData,omitempty"`
	// Optional. Text part (can be code).
	Text string `json:"text,omitempty"`
}

A datatype containing media content. Exactly one field within a Part should be set, representing the specific type of content being conveyed. Using multiple fields within the same `Part` instance is considered invalid.

func NewPartFromBytes added in v0.1.0

func NewPartFromBytes(data []byte, mimeType string) *Part

NewPartFromBytes builds a Part from a given byte array and mime type.

func NewPartFromCodeExecutionResult added in v0.1.0

func NewPartFromCodeExecutionResult(outcome Outcome, output string) *Part

NewPartFromCodeExecutionResult builds a Part from a given outcome and output.

func NewPartFromExecutableCode added in v0.1.0

func NewPartFromExecutableCode(code string, language Language) *Part

NewPartFromExecutableCode builds a Part from a given executable code and language.

func NewPartFromFunctionCall added in v0.1.0

func NewPartFromFunctionCall(name string, args map[string]any) *Part

NewPartFromFunctionCall builds a Part from a given function call.

func NewPartFromFunctionResponse added in v0.1.0

func NewPartFromFunctionResponse(name string, response map[string]any) *Part

NewPartFromFunctionResponse builds a Part from a given function response.

func NewPartFromText added in v0.1.0

func NewPartFromText(text string) *Part

NewPartFromText builds a Part from a given text.

func NewPartFromURI added in v0.1.0

func NewPartFromURI(fileURI, mimeType string) *Part

NewPartFromURI builds a Part from a given file URI and mime type.

func NewPartFromVideoMetadata added in v0.1.0

func NewPartFromVideoMetadata(endOffset, startOffset string) *Part

NewPartFromVideoMetadata builds a Part from a given end offset and start offset.

type PersonGeneration added in v0.1.0

type PersonGeneration string

Enum that controls the generation of people.

const (
	PersonGenerationDontAllow  PersonGeneration = "DONT_ALLOW"
	PersonGenerationAllowAdult PersonGeneration = "ALLOW_ADULT"
	PersonGenerationAllowAll   PersonGeneration = "ALLOW_ALL"
)

type PrebuiltVoiceConfig

type PrebuiltVoiceConfig struct {
	// The name of the prebuilt voice to use.
	VoiceName string `json:"voiceName,omitempty"`
}

The configuration for the prebuilt speaker to use.

type RawReferenceImage

type RawReferenceImage struct {
	// The reference image for the editing operation.
	ReferenceImage *Image `json:"referenceImage,omitempty"`
	// The ID of the reference image.
	ReferenceID int32 `json:"referenceId,omitempty"`
	// contains filtered or unexported fields
}

A raw reference image. A raw reference image represents the base image to edit, provided by the user. It can optionally be provided in addition to a mask reference image or a style reference image.

func NewRawReferenceImage added in v0.5.0

func NewRawReferenceImage(referenceImage *Image, referenceID int32) *RawReferenceImage

NewRawReferenceImage creates a new RawReferenceImage.

type ReferenceImage added in v0.5.0

type ReferenceImage interface {
	// contains filtered or unexported methods
}

ReferenceImage is an interface that represents a generic reference image.

You can create instances that implement this interface using the following constructor functions:

  • NewRawReferenceImage
  • NewMaskReferenceImage
  • NewControlReferenceImage
  • NewStyleReferenceImage
  • NewSubjectReferenceImage
  • ...

type Retrieval

type Retrieval struct {
	// Optional. Deprecated. This option is no longer supported.
	DisableAttribution bool `json:"disableAttribution,omitempty"`
	// Set to use data source powered by Vertex AI Search.
	VertexAISearch *VertexAISearch `json:"vertexAiSearch,omitempty"`
	// Set to use data source powered by Vertex RAG store. User data is uploaded via the
	// VertexRAGDataService.
	VertexRAGStore *VertexRAGStore `json:"vertexRagStore,omitempty"`
}

Defines a retrieval tool that model can call to access external knowledge.

type RetrievalMetadata

type RetrievalMetadata struct {
	// Optional. Score indicating how likely information from Google Search could help answer
	// the prompt. The score is in the range `[0, 1]`, where 0 is the least likely and 1
	// is the most likely. This score is only populated when Google Search grounding and
	// dynamic retrieval is enabled. It will be compared to the threshold to determine whether
	// to trigger Google Search. If nil, then API will determine the default value.
	GoogleSearchDynamicRetrievalScore *float32 `json:"googleSearchDynamicRetrievalScore,omitempty"`
}

Metadata related to retrieval in the grounding flow.

type SafetyFilterLevel added in v0.1.0

type SafetyFilterLevel string

Enum that controls the safety filter level for objectionable content.

const (
	SafetyFilterLevelBlockLowAndAbove    SafetyFilterLevel = "BLOCK_LOW_AND_ABOVE"
	SafetyFilterLevelBlockMediumAndAbove SafetyFilterLevel = "BLOCK_MEDIUM_AND_ABOVE"
	SafetyFilterLevelBlockOnlyHigh       SafetyFilterLevel = "BLOCK_ONLY_HIGH"
	SafetyFilterLevelBlockNone           SafetyFilterLevel = "BLOCK_NONE"
)

type SafetyRating

type SafetyRating struct {
	// Output only. Indicates whether the content was filtered out because of this rating.
	Blocked bool `json:"blocked,omitempty"`
	// Output only. Harm category.
	Category HarmCategory `json:"category,omitempty"`
	// Output only. Harm probability levels in the content.
	Probability HarmProbability `json:"probability,omitempty"`
	// Output only. Harm probability score. If nil, then no ProbabilityScore is returned
	// by the API.
	ProbabilityScore *float32 `json:"probabilityScore,omitempty"`
	// Output only. Harm severity levels in the content.
	Severity HarmSeverity `json:"severity,omitempty"`
	// Output only. Harm severity score. If nil, then no ProbabilityScore is returned by
	// the API.
	SeverityScore *float32 `json:"severityScore,omitempty"`
}

Safety rating corresponding to the generated content.

type SafetySetting

type SafetySetting struct {
	// Determines if the harm block method uses probability or probability
	// and severity scores.
	Method HarmBlockMethod `json:"method,omitempty"`
	// Required. Harm category.
	Category HarmCategory `json:"category,omitempty"`
	// Required. The harm block threshold.
	Threshold HarmBlockThreshold `json:"threshold,omitempty"`
}

Safety settings.

type Schema

type Schema struct {
	// Optional. Example of the object. Will only populated when the object is the root.
	Example any `json:"example,omitempty"`
	// Optional. Pattern of the Type.STRING to restrict a string to a regular expression.
	Pattern string `json:"pattern,omitempty"`
	// Optional. SCHEMA FIELDS FOR TYPE INTEGER and NUMBER Minimum value of the Type.INTEGER
	// and Type.NUMBER
	Minimum *float64 `json:"minimum,omitempty"`
	// Optional. Default value of the data.
	Default any `json:"default,omitempty"`
	// Optional. The value should be validated against any (one or more) of the subschemas
	// in the list.
	AnyOf []*Schema `json:"anyOf,omitempty"`
	// Optional. Maximum length of the Type.STRING
	MaxLength *int64 `json:"maxLength,omitempty"`
	// Optional. The title of the Schema.
	Title string `json:"title,omitempty"`
	// Optional. SCHEMA FIELDS FOR TYPE STRING Minimum length of the Type.STRING
	MinLength *int64 `json:"minLength,omitempty"`
	// Optional. Minimum number of the properties for Type.OBJECT.
	MinProperties *int64 `json:"minProperties,omitempty"`
	// Optional. Maximum value of the Type.INTEGER and Type.NUMBER
	Maximum *float64 `json:"maximum,omitempty"`
	// Optional. Maximum number of the properties for Type.OBJECT.
	MaxProperties *int64 `json:"maxProperties,omitempty"`
	// Optional. The description of the data.
	Description string `json:"description,omitempty"`
	// Optional. Possible values of the element of primitive type with enum format. Examples:
	// 1. We can define direction as : {type:STRING, format:enum, enum:["EAST", NORTH",
	// "SOUTH", "WEST"]} 2. We can define apartment number as : {type:INTEGER, format:enum,
	// enum:["101", "201", "301"]}
	Enum []string `json:"enum,omitempty"`
	// Optional. The format of the data. Supported formats: for NUMBER type: "float", "double"
	// for INTEGER type: "int32", "int64" for STRING type: "email", "byte", etc
	Format string `json:"format,omitempty"`
	// Optional. SCHEMA FIELDS FOR TYPE ARRAY Schema of the elements of Type.ARRAY.
	Items *Schema `json:"items,omitempty"`
	// Optional. Maximum number of the elements for Type.ARRAY.
	MaxItems *int64 `json:"maxItems,omitempty"`
	// Optional. Minimum number of the elements for Type.ARRAY.
	MinItems *int64 `json:"minItems,omitempty"`
	// Optional. Indicates if the value may be null.
	Nullable bool `json:"nullable,omitempty"`
	// Optional. SCHEMA FIELDS FOR TYPE OBJECT Properties of Type.OBJECT.
	Properties map[string]*Schema `json:"properties,omitempty"`
	// Optional. The order of the properties. Not a standard field in open API spec. Only
	// used to support the order of the properties.
	PropertyOrdering []string `json:"propertyOrdering,omitempty"`
	// Optional. Required properties of Type.OBJECT.
	Required []string `json:"required,omitempty"`
	// Optional. The type of the data.
	Type Type `json:"type,omitempty"`
}

Schema that defines the format of input and output data. Represents a select subset of an OpenAPI 3.0 schema object. You can find more details and examples at https://spec.openapis.org/oas/v3.0.3.html#schema-object

type SearchEntryPoint

type SearchEntryPoint struct {
	// Optional. Web content snippet that can be embedded in a web page or an app webview.
	RenderedContent string `json:"renderedContent,omitempty"`
	// Optional. Base64 encoded JSON representing array of tuple.
	SDKBlob []byte `json:"sdkBlob,omitempty"`
}

Google search entry point.

type Segment

type Segment struct {
	// Output only. End index in the given Part, measured in bytes. Offset from the start
	// of the Part, exclusive, starting at zero.
	EndIndex int32 `json:"endIndex,omitempty"`
	// Output only. The index of a Part object within its parent Content object.
	PartIndex int32 `json:"partIndex,omitempty"`
	// Output only. Start index in the given Part, measured in bytes. Offset from the start
	// of the Part, inclusive, starting at zero.
	StartIndex int32 `json:"startIndex,omitempty"`
	// Output only. The text corresponding to the segment from the response.
	Text string `json:"text,omitempty"`
}

Segment of the content.

type ServerError

type ServerError struct {
	// contains filtered or unexported fields
}

ServerError is an error that occurs when the GenAI API encounters an unexpected server problem.

func (ServerError) Error

func (e ServerError) Error() string

Error returns a string representation of the ServerError.

type Session

type Session struct {
	// contains filtered or unexported fields
}

Session is a realtime connection to the API. The live module is experimental.

func (*Session) Close

func (s *Session) Close()

Close terminates the connection. The live module is experimental.

func (*Session) Receive

func (s *Session) Receive() (*LiveServerMessage, error)

Receive reads a LiveServerMessage from the connection. It returns the received message or an error if reading or unmarshalling fails. The live module is experimental.

func (*Session) Send

func (s *Session) Send(input *LiveClientMessage) error

Send transmits a LiveClientMessage over the established connection. It returns an error if sending the message fails. The live module is experimental.

type SpeechConfig

type SpeechConfig struct {
	// The configuration for the speaker to use.
	VoiceConfig *VoiceConfig `json:"voiceConfig,omitempty"`
}

The speech generation configuration.

type StyleReferenceConfig

type StyleReferenceConfig struct {
	// A text description of the style to use for the generated image.
	StyleDescription string `json:"styleDescription,omitempty"`
}

Configuration for a Style reference image.

type StyleReferenceImage

type StyleReferenceImage struct {
	// The reference image for the editing operation.
	ReferenceImage *Image `json:"referenceImage,omitempty"`
	// The ID of the reference image.
	ReferenceID int32 `json:"referenceId,omitempty"`

	// Configuration for the style reference image.
	Config *StyleReferenceConfig `json:"config,omitempty"`
	// contains filtered or unexported fields
}

A style reference image. This encapsulates a style reference image provided by the user, and additionally optional config parameters for the style reference image. A raw reference image can also be provided as a destination for the style to be applied to.

func NewStyleReferenceImage added in v0.5.0

func NewStyleReferenceImage(referenceImage *Image, referenceID int32, config *StyleReferenceConfig) *StyleReferenceImage

NewStyleReferenceImage creates a new ControlReferenceImage.

type SubjectReferenceConfig

type SubjectReferenceConfig struct {
	// The subject type of a subject reference image.
	SubjectType SubjectReferenceType `json:"subjectType,omitempty"`
	// Subject description for the image.
	SubjectDescription string `json:"subjectDescription,omitempty"`
}

Configuration for a Subject reference image.

type SubjectReferenceImage

type SubjectReferenceImage struct {
	// The reference image for the editing operation.
	ReferenceImage *Image `json:"referenceImage,omitempty"`
	// The ID of the reference image.
	ReferenceID int32 `json:"referenceId,omitempty"`

	// Configuration for the subject reference image.
	Config *SubjectReferenceConfig `json:"config,omitempty"`
	// contains filtered or unexported fields
}

A subject reference image. This encapsulates a subject reference image provided by the user, and additionally optional config parameters for the subject reference image. A raw reference image can also be provided as a destination for the subject to be applied to.

func NewSubjectReferenceImage added in v0.5.0

func NewSubjectReferenceImage(referenceImage *Image, referenceID int32, config *SubjectReferenceConfig) *SubjectReferenceImage

NewSubjectReferenceImage creates a new StyleReferenceImage.

type SubjectReferenceType

type SubjectReferenceType string

Enum representing the subject type of a subject reference image.

const (
	SubjectReferenceTypeSubjectTypeDefault SubjectReferenceType = "SUBJECT_TYPE_DEFAULT"
	SubjectReferenceTypeSubjectTypePerson  SubjectReferenceType = "SUBJECT_TYPE_PERSON"
	SubjectReferenceTypeSubjectTypeAnimal  SubjectReferenceType = "SUBJECT_TYPE_ANIMAL"
	SubjectReferenceTypeSubjectTypeProduct SubjectReferenceType = "SUBJECT_TYPE_PRODUCT"
)

type ThinkingConfig added in v0.1.0

type ThinkingConfig struct {
	// Indicates whether to include thoughts in the response. If true, thoughts are returned
	// only if the model supports thought and thoughts are available.
	IncludeThoughts bool `json:"includeThoughts,omitempty"`
}

The thinking features configuration.

type TokensInfo added in v0.1.0

type TokensInfo struct {
	// Optional. Optional fields for the role from the corresponding Content.
	Role string `json:"role,omitempty"`
	// A list of token IDs from the input.
	TokenIDs []int64 `json:"tokenIds,omitempty"`
	// A list of tokens from the input.
	Tokens [][]byte `json:"tokens,omitempty"`
}

Tokens info with a list of tokens and the corresponding list of token ids.

func (*TokensInfo) UnmarshalJSON added in v0.1.0

func (ti *TokensInfo) UnmarshalJSON(data []byte) error

type Tool

type Tool struct {
	// List of function declarations that the tool supports.
	FunctionDeclarations []*FunctionDeclaration `json:"functionDeclarations,omitempty"`
	// Optional. Retrieval tool type. System will always execute the provided retrieval
	// tool(s) to get external knowledge to answer the prompt. Retrieval results are presented
	// to the model for generation.
	Retrieval *Retrieval `json:"retrieval,omitempty"`
	// Optional. Google Search tool type. Specialized retrieval tool
	// that is powered by Google Search.
	GoogleSearch *GoogleSearch `json:"googleSearch,omitempty"`
	// Optional. GoogleSearchRetrieval tool type. Specialized retrieval tool that is powered
	// by Google search.
	GoogleSearchRetrieval *GoogleSearchRetrieval `json:"googleSearchRetrieval,omitempty"`
	// Optional. CodeExecution tool type. Enables the model to execute code as part of generation.
	// This field is only used by the Gemini Developer API services.
	CodeExecution *ToolCodeExecution `json:"codeExecution,omitempty"`
}

Tool details of a tool that the model may use to generate a response.

type ToolCodeExecution

type ToolCodeExecution struct {
}

Tool that executes code generated by the model, and automatically returns the result to the model. See also [ExecutableCode]and CodeExecutionResult which are input and output to this tool.

type ToolConfig

type ToolConfig struct {
	// Optional. Function calling config.
	FunctionCallingConfig *FunctionCallingConfig `json:"functionCallingConfig,omitempty"`
}

Tool config. This config is shared for all tools provided in the request.

type TunedModelInfo added in v0.4.0

type TunedModelInfo struct {
	// ID of the base model that you want to tune.
	BaseModel string `json:"baseModel,omitempty"`
	// Date and time when the base model was created.
	CreateTime *time.Time `json:"createTime,omitempty"`
	// Date and time when the base model was last updated.
	UpdateTime *time.Time `json:"updateTime,omitempty"`
}

A tuned machine learning model.

type Type

type Type string

The type of the data.

const (
	// Not specified, should not be used.
	TypeUnspecified Type = "TYPE_UNSPECIFIED"
	// OpenAPI string type
	TypeString Type = "STRING"
	// OpenAPI number type
	TypeNumber Type = "NUMBER"
	// OpenAPI integer type
	TypeInteger Type = "INTEGER"
	// OpenAPI boolean type
	TypeBoolean Type = "BOOLEAN"
	// OpenAPI array type
	TypeArray Type = "ARRAY"
	// OpenAPI object type
	TypeObject Type = "OBJECT"
)

type UpdateCachedContentConfig added in v0.1.0

type UpdateCachedContentConfig struct {
	// Used to override HTTP request options.
	HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"`
	// The TTL for this resource. The expiration time is computed: now + TTL.
	TTL string `json:"ttl,omitempty"`
	// Timestamp of when this resource is considered expired.
	ExpireTime *time.Time `json:"expireTime,omitempty"`
}

Optional parameters for caches.update method.

type UpdateModelConfig added in v0.4.0

type UpdateModelConfig struct {
	// Used to override HTTP request options.
	HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"`

	DisplayName string `json:"displayName,omitempty"`

	Description string `json:"description,omitempty"`
}

type UploadFileConfig

type UploadFileConfig struct {
	// Used to override HTTP request options.
	HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"`
	// The name of the file in the destination (e.g., 'files/sample-image'. If not provided
	// one will be generated.
	Name string `json:"name,omitempty"`
	// mime_type: The MIME type of the file. If not provided, it will be inferred from the
	// file extension.
	MIMEType string `json:"mimeType,omitempty"`
	// Optional display name of the file.
	DisplayName string `json:"displayName,omitempty"`
}

Used to override the default configuration.

type UpscaleImageConfig

type UpscaleImageConfig struct {
	// Used to override HTTP request options.
	HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"`
	// Whether to include a reason for filtered-out images in the
	// response.
	IncludeRAIReason bool `json:"includeRaiReason,omitempty"`
	// The image format that the output should be saved as.
	OutputMIMEType string `json:"outputMimeType,omitempty"`
	// The level of compression if the output_mime_type is image/jpeg. If nil, then API
	// will determine the default value.
	OutputCompressionQuality *int32 `json:"outputCompressionQuality,omitempty"`
}

Configuration for upscaling an image. For more information on this configuration, refer to the `Imagen API reference documentation <https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/imagen-api>`_.

type UpscaleImageParameters

type UpscaleImageParameters struct {
	// The model to use.
	Model string `json:"model,omitempty"`
	// The input image to upscale.
	Image *Image `json:"image,omitempty"`
	// The factor to upscale the image (x2 or x4).
	UpscaleFactor string `json:"upscaleFactor,omitempty"`
	// Configuration for upscaling.
	Config *UpscaleImageConfig `json:"config,omitempty"`
}

User-facing config UpscaleImageParameters.

type UpscaleImageResponse added in v0.4.0

type UpscaleImageResponse struct {
	// Generated images.
	GeneratedImages []*GeneratedImage `json:"generatedImages,omitempty"`
}

type VertexAISearch

type VertexAISearch struct {
	// Required. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}`
	Datastore string `json:"datastore,omitempty"`
}

Retrieve from Vertex AI Search datastore for grounding. See https://cloud.google.com/products/agent-builder

type VertexRAGStore

type VertexRAGStore struct {
	// Optional. Deprecated. Please use rag_resources instead.
	RAGCorpora []string `json:"ragCorpora,omitempty"`
	// Optional. The representation of the RAG source. It can be used to specify corpus
	// only or ragfiles. Currently only support one corpus or multiple files from one corpus.
	// In the future we may open up multiple corpora support.
	RAGResources []*VertexRAGStoreRAGResource `json:"ragResources,omitempty"`
	// Optional. Number of top k results to return from the selected corpora.
	SimilarityTopK *int32 `json:"similarityTopK,omitempty"`
	// Optional. Only return results with vector distance smaller than the threshold.
	VectorDistanceThreshold *float64 `json:"vectorDistanceThreshold,omitempty"`
}

Retrieve from Vertex RAG Store for grounding. You can find API default values and more details at https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/rag-api-v1#parameters-list

type VertexRAGStoreRAGResource

type VertexRAGStoreRAGResource struct {
	// Optional. RAGCorpora resource name. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`
	RAGCorpus string `json:"ragCorpus,omitempty"`
	// Optional. rag_file_id. The files should be in the same rag_corpus set in rag_corpus
	// field.
	RAGFileIDs []string `json:"ragFileIds,omitempty"`
}

The definition of the RAG resource.

type VideoMetadata

type VideoMetadata struct {
	// Optional. The end offset of the video.
	EndOffset string `json:"endOffset,omitempty"`
	// Optional. The start offset of the video.
	StartOffset string `json:"startOffset,omitempty"`
}

Metadata describes the input video content.

type VoiceConfig

type VoiceConfig struct {
	// The configuration for the speaker to use.
	PrebuiltVoiceConfig *PrebuiltVoiceConfig `json:"prebuiltVoiceConfig,omitempty"`
}

The configuration for the voice to use.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL