Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .changeset/six-ghosts-beam.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
'@ai-sdk/google': patch
---

Make candidates optional
126 changes: 68 additions & 58 deletions packages/google/src/google-generative-ai-language-model.ts
Original file line number Diff line number Diff line change
Expand Up @@ -150,11 +150,11 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
responseFormat?.type === 'json' ? 'application/json' : undefined,
responseSchema:
responseFormat?.type === 'json' &&
responseFormat.schema != null &&
// Google GenAI does not support all OpenAPI Schema features,
// so this is needed as an escape hatch:
// TODO convert into provider option
(googleOptions?.structuredOutputs ?? true)
responseFormat.schema != null &&
// Google GenAI does not support all OpenAPI Schema features,
// so this is needed as an escape hatch:
// TODO convert into provider option
(googleOptions?.structuredOutputs ?? true)
? convertJSONSchemaToOpenAPISchema(responseFormat.schema)
: undefined,
...(googleOptions?.audioTimestamp && {
Expand Down Expand Up @@ -210,13 +210,23 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
fetch: this.config.fetch,
});

const candidate = response.candidates[0];
const candidate = response.candidates?.[0];
const content: Array<LanguageModelV3Content> = [];

// map ordered parts to content:
const parts = candidate.content?.parts ?? [];
const parts = candidate?.content?.parts ?? [];

const usageMetadata = response.usageMetadata;
const warningsWithMissingCandidate: LanguageModelV3CallWarning[] =
candidate == null
? [
...warnings,
{
type: 'other',
message: 'No candidates returned from Google Generative AI.',
},
]
: warnings;

// Associates a code execution result with its preceding call.
let lastCodeExecutionToolCallId: string | undefined;
Expand Down Expand Up @@ -279,7 +289,7 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {

const sources =
extractSources({
groundingMetadata: candidate.groundingMetadata,
groundingMetadata: candidate?.groundingMetadata,
generateId: this.config.generateId,
}) ?? [];
for (const source of sources) {
Expand All @@ -289,7 +299,7 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
return {
content,
finishReason: mapGoogleGenerativeAIFinishReason({
finishReason: candidate.finishReason,
finishReason: candidate?.finishReason,
hasToolCalls: content.some(part => part.type === 'tool-call'),
}),
usage: {
Expand All @@ -299,13 +309,13 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
reasoningTokens: usageMetadata?.thoughtsTokenCount ?? undefined,
cachedInputTokens: usageMetadata?.cachedContentTokenCount ?? undefined,
},
warnings,
warnings: warningsWithMissingCandidate,
providerMetadata: {
google: {
promptFeedback: response.promptFeedback ?? null,
groundingMetadata: candidate.groundingMetadata ?? null,
urlContextMetadata: candidate.urlContextMetadata ?? null,
safetyRatings: candidate.safetyRatings ?? null,
groundingMetadata: candidate?.groundingMetadata ?? null,
urlContextMetadata: candidate?.urlContextMetadata ?? null,
safetyRatings: candidate?.safetyRatings ?? null,
usageMetadata: usageMetadata ?? null,
},
},
Expand Down Expand Up @@ -483,10 +493,10 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
id: currentReasoningBlockId,
providerMetadata: part.thoughtSignature
? {
google: {
thoughtSignature: part.thoughtSignature,
},
}
google: {
thoughtSignature: part.thoughtSignature,
},
}
: undefined,
});
}
Expand All @@ -497,8 +507,8 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
delta: part.text,
providerMetadata: part.thoughtSignature
? {
google: { thoughtSignature: part.thoughtSignature },
}
google: { thoughtSignature: part.thoughtSignature },
}
: undefined,
});
} else {
Expand All @@ -519,10 +529,10 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
id: currentTextBlockId,
providerMetadata: part.thoughtSignature
? {
google: {
thoughtSignature: part.thoughtSignature,
},
}
google: {
thoughtSignature: part.thoughtSignature,
},
}
: undefined,
});
}
Expand All @@ -533,8 +543,8 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
delta: part.text,
providerMetadata: part.thoughtSignature
? {
google: { thoughtSignature: part.thoughtSignature },
}
google: { thoughtSignature: part.thoughtSignature },
}
: undefined,
});
}
Expand Down Expand Up @@ -645,26 +655,27 @@ function getToolCallsFromParts({
parts: ContentSchema['parts'];
generateId: () => string;
}) {
const functionCallParts = parts?.filter(
part => 'functionCall' in part,
) as Array<
GoogleGenerativeAIContentPart & {
const functionCallParts = (parts ?? []).filter(
(
part,
): part is GoogleGenerativeAIContentPart & {
functionCall: { name: string; args: unknown };
thoughtSignature?: string | null;
}
>;
} =>
typeof part === 'object' && part != null && 'functionCall' in part,
);

return functionCallParts == null || functionCallParts.length === 0
? undefined
: functionCallParts.map(part => ({
type: 'tool-call' as const,
toolCallId: generateId(),
toolName: part.functionCall.name,
args: JSON.stringify(part.functionCall.args),
providerMetadata: part.thoughtSignature
? { google: { thoughtSignature: part.thoughtSignature } }
: undefined,
}));
type: 'tool-call' as const,
toolCallId: generateId(),
toolName: part.functionCall.name,
args: JSON.stringify(part.functionCall.args),
providerMetadata: part.thoughtSignature
? { google: { thoughtSignature: part.thoughtSignature } }
: undefined,
}));
}

function extractSources({
Expand Down Expand Up @@ -881,15 +892,17 @@ export const getUrlContextMetadataSchema = () =>
const responseSchema = lazySchema(() =>
zodSchema(
z.object({
candidates: z.array(
z.object({
content: getContentSchema().nullish().or(z.object({}).strict()),
finishReason: z.string().nullish(),
safetyRatings: z.array(getSafetyRatingSchema()).nullish(),
groundingMetadata: getGroundingMetadataSchema().nullish(),
urlContextMetadata: getUrlContextMetadataSchema().nullish(),
}),
),
candidates: z
.array(
z.object({
content: getContentSchema().nullish().or(z.object({}).strict()),
finishReason: z.string().nullish(),
safetyRatings: z.array(getSafetyRatingSchema()).nullish(),
groundingMetadata: getGroundingMetadataSchema().nullish(),
urlContextMetadata: getUrlContextMetadataSchema().nullish(),
}),
)
.nullish(),
usageMetadata: usageSchema.nullish(),
promptFeedback: z
.object({
Expand All @@ -901,24 +914,21 @@ const responseSchema = lazySchema(() =>
),
);

type ContentSchema = NonNullable<
InferSchema<typeof responseSchema>['candidates'][number]['content']
>;
type CandidatesSchema = InferSchema<typeof responseSchema>['candidates'];
type CandidateSchema = NonNullable<CandidatesSchema>[number];

type ContentSchema = NonNullable<CandidateSchema['content']>;
export type GroundingMetadataSchema = NonNullable<
InferSchema<typeof responseSchema>['candidates'][number]['groundingMetadata']
CandidateSchema['groundingMetadata']
>;

type GroundingChunkSchema = NonNullable<
GroundingMetadataSchema['groundingChunks']
>[number];

export type UrlContextMetadataSchema = NonNullable<
InferSchema<typeof responseSchema>['candidates'][number]['urlContextMetadata']
>;
export type UrlContextMetadataSchema = NonNullable<CandidateSchema['urlContextMetadata']>;

export type SafetyRatingSchema = NonNullable<
InferSchema<typeof responseSchema>['candidates'][number]['safetyRatings']
>[number];
export type SafetyRatingSchema = NonNullable<CandidateSchema['safetyRatings']>[number];

// limited version of the schema, focussed on what is needed for the implementation
// this approach limits breakages when the API changes and increases efficiency
Expand Down