Skip to content

Commit 4cc583c

Browse files
Merge main into release
2 parents c77c9df + bc2b2cd commit 4cc583c

File tree

19 files changed

+187
-132
lines changed

19 files changed

+187
-132
lines changed

.changeset/four-vans-mix.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
'@firebase/auth': minor
3+
---
4+
5+
Upgraded react-native-async-storage peerDependency to v2+.

.changeset/gentle-queens-argue.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
'@firebase/ai': patch
3+
---
4+
5+
update the link /genai to /ailogic

.changeset/gorgeous-rice-carry.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
'@firebase/ai': patch
3+
---
4+
5+
Fix `generateContentStream` returning wrong `inferenceSource`.

.changeset/thin-sheep-smoke.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
'@firebase/firestore': patch
3+
---
4+
5+
Fix: Corrected misleading error message when doc() is called with undefined.

.github/PULL_REQUEST_TEMPLATE.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,6 @@ Before you file this pull request, please read these guidelines:
1414

1515
### API Changes
1616

17-
* At this time we cannot accept changes that affect the public API. If you'd like to help
18-
us make Firebase APIs better, please propose your change in an issue so that we
19-
can discuss it together.
17+
* Changes that affect the public API will require internal review. Before making a
18+
PR that changes the public API, we would suggest first proposing your change in an
19+
issue so that we can discuss it together.

.vscode/launch.json

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,8 @@
3737
"src/index.node.ts",
3838
"--timeout",
3939
"5000",
40-
"integration/**/*.test.ts"
40+
"integration/**/*.test.ts",
41+
"--grep", "${input:grepString}",
4142
],
4243
"env": {
4344
"TS_NODE_COMPILER_OPTIONS": "{\"module\":\"commonjs\"}"
@@ -184,5 +185,13 @@
184185
"cwd": "${workspaceRoot}/packages/firestore",
185186
"args": ["start", "--auto-watch", "--integration", "--browsers", "Chrome"]
186187
}
188+
],
189+
"inputs": [
190+
{
191+
"id": "grepString",
192+
"type": "promptString",
193+
"description": "Enter grep pattern (e.g., 'Google AI gemini-2.0-flash generateContent')",
194+
"default": ""
195+
}
187196
]
188197
}

packages/ai/integration/chat.test.ts

Lines changed: 15 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -24,9 +24,10 @@ import {
2424
SafetySetting,
2525
getGenerativeModel
2626
} from '../src';
27-
import { testConfigs, TOKEN_COUNT_DELTA } from './constants';
27+
import { testConfigs } from './constants';
2828

29-
describe('Chat Session', () => {
29+
describe('Chat Session', function () {
30+
this.timeout(20_000);
3031
testConfigs.forEach(testConfig => {
3132
describe(`${testConfig.toString()}`, () => {
3233
const commonGenerationConfig: GenerationConfig = {
@@ -98,62 +99,21 @@ describe('Chat Session', () => {
9899

99100
if (model.model.includes('gemini-2.5-flash')) {
100101
// Token counts can vary slightly in chat context
101-
expect(response1.usageMetadata!.promptTokenCount).to.be.closeTo(
102-
17, // "What is the capital of France?" + system instruction
103-
TOKEN_COUNT_DELTA + 2 // More variance for chat context
104-
);
105-
expect(response1.usageMetadata!.candidatesTokenCount).to.be.closeTo(
106-
8, // "Paris"
107-
TOKEN_COUNT_DELTA
108-
);
109-
expect(response1.usageMetadata!.totalTokenCount).to.be.closeTo(
110-
49, // "What is the capital of France?" + system instruction + "Paris"
111-
TOKEN_COUNT_DELTA + 3 // More variance for chat context
112-
);
113-
expect(response1.usageMetadata!.totalTokenCount).to.be.closeTo(
114-
49, // "What is the capital of France?" + system instruction + "Paris"
115-
TOKEN_COUNT_DELTA + 3 // More variance for chat context
116-
);
117-
118-
expect(response2.usageMetadata!.promptTokenCount).to.be.closeTo(
119-
32, // History + "And what about Italy?" + system instruction
120-
TOKEN_COUNT_DELTA + 5 // More variance for chat context with history
121-
);
122-
expect(response2.usageMetadata!.candidatesTokenCount).to.be.closeTo(
123-
8,
124-
TOKEN_COUNT_DELTA
125-
);
126-
expect(response2.usageMetadata!.totalTokenCount).to.be.closeTo(
127-
68,
128-
TOKEN_COUNT_DELTA + 2
129-
);
102+
expect(response1.usageMetadata!.promptTokenCount).to.not.equal(0);
103+
expect(response1.usageMetadata!.candidatesTokenCount).to.not.equal(0);
104+
expect(response1.usageMetadata!.totalTokenCount).to.not.equal(0);
105+
expect(response2.usageMetadata!.promptTokenCount).to.not.equal(0);
106+
expect(response2.usageMetadata!.candidatesTokenCount).to.not.equal(0);
107+
expect(response2.usageMetadata!.totalTokenCount).to.not.equal(0);
130108
} else if (model.model.includes('gemini-2.0-flash')) {
131109
expect(response1.usageMetadata).to.not.be.null;
132110
// Token counts can vary slightly in chat context
133-
expect(response1.usageMetadata!.promptTokenCount).to.be.closeTo(
134-
15, // "What is the capital of France?" + system instruction
135-
TOKEN_COUNT_DELTA + 2 // More variance for chat context
136-
);
137-
expect(response1.usageMetadata!.candidatesTokenCount).to.be.closeTo(
138-
8, // "Paris"
139-
TOKEN_COUNT_DELTA
140-
);
141-
expect(response1.usageMetadata!.totalTokenCount).to.be.closeTo(
142-
23, // "What is the capital of France?" + system instruction + "Paris"
143-
TOKEN_COUNT_DELTA + 3 // More variance for chat context
144-
);
145-
expect(response2.usageMetadata!.promptTokenCount).to.be.closeTo(
146-
28, // History + "And what about Italy?" + system instruction
147-
TOKEN_COUNT_DELTA + 5 // More variance for chat context with history
148-
);
149-
expect(response2.usageMetadata!.candidatesTokenCount).to.be.closeTo(
150-
8,
151-
TOKEN_COUNT_DELTA
152-
);
153-
expect(response2.usageMetadata!.totalTokenCount).to.be.closeTo(
154-
36,
155-
TOKEN_COUNT_DELTA
156-
);
111+
expect(response1.usageMetadata!.promptTokenCount).to.not.equal(0);
112+
expect(response1.usageMetadata!.candidatesTokenCount).to.not.equal(0);
113+
expect(response1.usageMetadata!.totalTokenCount).to.not.equal(0);
114+
expect(response2.usageMetadata!.promptTokenCount).to.not.equal(0);
115+
expect(response2.usageMetadata!.candidatesTokenCount).to.not.equal(0);
116+
expect(response2.usageMetadata!.totalTokenCount).to.not.equal(0);
157117
}
158118
});
159119
});

packages/ai/integration/constants.ts

Lines changed: 25 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -52,12 +52,19 @@ const backendNames: Map<BackendType, string> = new Map([
5252
[BackendType.VERTEX_AI, 'Vertex AI']
5353
]);
5454

55-
const modelNames: readonly string[] = ['gemini-2.0-flash', 'gemini-2.5-flash'];
55+
const modelNames: readonly string[] = [
56+
'gemini-2.0-flash-001',
57+
'gemini-2.0-flash-lite-001',
58+
'gemini-2.5-flash',
59+
'gemini-2.5-flash-lite',
60+
'gemini-2.5-pro',
61+
'gemini-3-pro-preview'
62+
];
5663

5764
// The Live API requires a different set of models, and they're different for each backend.
5865
const liveModelNames: Map<BackendType, string[]> = new Map([
5966
[BackendType.GOOGLE_AI, ['gemini-live-2.5-flash-preview']],
60-
[BackendType.VERTEX_AI, ['gemini-2.0-flash-exp']]
67+
[BackendType.VERTEX_AI, ['gemini-2.0-flash-live-preview-04-09']]
6168
]);
6269

6370
/**
@@ -94,6 +101,22 @@ export const liveTestConfigs: readonly TestConfig[] = backends.flatMap(
94101
}
95102
);
96103

104+
/**
105+
* Test configurations used for server prompt templates integration tests.
106+
* Server prompt templates don't define the model name from the client, so these test configs
107+
* do not define a model string.
108+
* These tests should only run once per backend, rather than once per backend *per model*.
109+
*/
110+
export const promptTemplatesTestConfigs: readonly TestConfig[] =
111+
backends.flatMap(backend => {
112+
const ai = getAI(app, { backend });
113+
return {
114+
ai,
115+
model: '', // Unused by prompt templates tests
116+
toString: () => formatConfigAsString({ ai, model: '' }).trim()
117+
};
118+
});
119+
97120
export const TINY_IMG_BASE64 =
98121
'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABAQMAAAAl21bKAAAAA1BMVEUAAACnej3aAAAAAXRSTlMAQObYZgAAAApJREFUCNdjYAAAAAIAAeIhvDMAAAAASUVORK5CYII=';
99122
export const IMAGE_MIME_TYPE = 'image/png';

packages/ai/integration/count-tokens.test.ts

Lines changed: 32 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -118,9 +118,18 @@ describe('Count Tokens', () => {
118118
};
119119
const response = await model.countTokens([imagePart]);
120120

121+
let expectedImageTokens: number;
122+
if (testConfig.model === 'gemini-3-pro-preview') {
123+
expectedImageTokens =
124+
testConfig.ai.backend.backendType === BackendType.GOOGLE_AI
125+
? 1089
126+
: 1120;
127+
} else {
128+
expectedImageTokens = 258;
129+
}
130+
121131
if (testConfig.ai.backend.backendType === BackendType.GOOGLE_AI) {
122-
const expectedImageTokens = 259;
123-
expect(response.totalTokens).to.equal(expectedImageTokens);
132+
expect(response.totalTokens).to.equal(expectedImageTokens + 1); // There will be 1 unexpected text token
124133
expect(response.totalBillableCharacters).to.be.undefined; // Incorrect behavior
125134
expect(response.promptTokensDetails!.length).to.equal(2);
126135
expect(response.promptTokensDetails![0]).to.deep.equal({
@@ -129,19 +138,18 @@ describe('Count Tokens', () => {
129138
});
130139
expect(response.promptTokensDetails![1]).to.deep.equal({
131140
modality: Modality.IMAGE,
132-
tokenCount: 258
141+
tokenCount: expectedImageTokens
133142
});
134143
} else if (
135144
testConfig.ai.backend.backendType === BackendType.VERTEX_AI
136145
) {
137-
const expectedImageTokens = 258;
138146
expect(response.totalTokens).to.equal(expectedImageTokens);
139147
expect(response.totalBillableCharacters).to.be.undefined; // Incorrect behavior
140148
expect(response.promptTokensDetails!.length).to.equal(1);
141149
// Note: No text tokens are present for Vertex AI with image-only input.
142150
expect(response.promptTokensDetails![0]).to.deep.equal({
143151
modality: Modality.IMAGE,
144-
tokenCount: 258
152+
tokenCount: expectedImageTokens
145153
});
146154
expect(response.promptTokensDetails![0].tokenCount).to.equal(
147155
expectedImageTokens
@@ -220,13 +228,23 @@ describe('Count Tokens', () => {
220228
expect(response.promptTokensDetails).to.exist;
221229
expect(response.promptTokensDetails!.length).to.equal(3);
222230

231+
let expectedImageTokenCount;
232+
if (testConfig.model === 'gemini-3-pro-preview') {
233+
expectedImageTokenCount =
234+
testConfig.ai.backend.backendType === BackendType.GOOGLE_AI
235+
? 1089
236+
: 1120;
237+
} else {
238+
expectedImageTokenCount = 258;
239+
}
240+
223241
expect(imageDetails).to.deep.equal({
224242
modality: Modality.IMAGE,
225-
tokenCount: 258
243+
tokenCount: expectedImageTokenCount
226244
});
227245

228246
if (testConfig.ai.backend.backendType === BackendType.GOOGLE_AI) {
229-
expect(response.totalTokens).to.equal(267);
247+
expect(response.totalTokens).to.equal(expectedImageTokenCount + 9);
230248
expect(response.totalBillableCharacters).to.be.undefined;
231249
expect(textDetails).to.deep.equal({
232250
modality: Modality.TEXT,
@@ -239,7 +257,7 @@ describe('Count Tokens', () => {
239257
} else if (
240258
testConfig.ai.backend.backendType === BackendType.VERTEX_AI
241259
) {
242-
expect(response.totalTokens).to.equal(261);
260+
expect(response.totalTokens).to.equal(expectedImageTokenCount + 3);
243261
expect(textDetails).to.deep.equal({
244262
modality: Modality.TEXT,
245263
tokenCount: 3
@@ -269,7 +287,12 @@ describe('Count Tokens', () => {
269287

270288
const response = await model.countTokens([filePart]);
271289

272-
const expectedFileTokens = 258;
290+
let expectedFileTokens: number;
291+
if (testConfig.model === 'gemini-3-pro-preview') {
292+
expectedFileTokens = 1120;
293+
} else {
294+
expectedFileTokens = 258;
295+
}
273296
expect(response.totalTokens).to.equal(expectedFileTokens);
274297
expect(response.totalBillableCharacters).to.be.undefined;
275298
expect(response.promptTokensDetails).to.exist;

0 commit comments

Comments
 (0)