diff --git a/src/modules/integrations.types.ts b/src/modules/integrations.types.ts index 2138aea..c0a90f2 100644 --- a/src/modules/integrations.types.ts +++ b/src/modules/integrations.types.ts @@ -48,9 +48,9 @@ export interface InvokeLLMParams { prompt: string; /** Optionally specify a model to override the app-level model setting for this specific call. * - * Options: `"gpt_5_mini"`, `"gemini_3_flash"`, `"gpt_5"`, `"gemini_3_pro"`, `"claude_sonnet_4_6"`, `"claude_opus_4_6"` + * Options: `"gpt_5_mini"`, `"gemini_3_flash"`, `"gpt_5"`, `"gemini_3.1_pro"`, `"claude_sonnet_4_6"`, `"claude_opus_4_6"` */ - model?: 'gpt_5_mini' | 'gemini_3_flash' | 'gpt_5' | 'gemini_3_pro' | 'claude_sonnet_4_6' | 'claude_opus_4_6'; + model?: 'gpt_5_mini' | 'gemini_3_flash' | 'gpt_5' | 'gemini_3.1_pro' | 'claude_sonnet_4_6' | 'claude_opus_4_6'; /** If set to `true`, the LLM will use Google Search, Maps, and News to gather real-time context before answering. * @default false */