Skip to content

Commit 61a97f5

Browse files
committed
Add Gemini Developer API code snippets
This commit introduces a collection of code snippets demonstrating the usage of the Gemini Developer API. It includes examples for model configuration, text-only and multimodal inputs (image, audio, video), multi-turn chat, and image generation/editing. The necessary dependencies for Firebase AI, Guava, and Reactive Streams have been added to gradle/libs.versions.toml and misc/build.gradle.kts. Region-Tag: android_gemini_developer_api_gemini_25_flash_model Region-Tag: android_gemini_developer_api_gemini_25_flash_image_model Region-Tag: android_gemini_developer_api_text_only_input Region-Tag: android_gemini_developer_api_multimodal_input Region-Tag: android_gemini_developer_api_multimodal_audio_input Region-Tag: android_gemini_developer_api_multimodal_video_input Region-Tag: android_gemini_developer_api_multiturn_chat Region-Tag: android_gemini_developer_api_generate_image_from_text Region-Tag: android_gemini_developer_api_edit_image Region-Tag: android_gemini_developer_api_edit_image_chat Region-Tag: android_gemini_developer_api_gemini_25_flash_model_java Region-Tag: android_gemini_developer_api_gemini_25_flash_image_model_java Region-Tag: android_gemini_developer_api_text_only_input_java Region-Tag: android_gemini_developer_api_multimodal_input_java Region-Tag: android_gemini_developer_api_multimodal_audio_input_java Region-Tag: android_gemini_developer_api_multimodal_video_input_java Region-Tag: android_gemini_developer_api_multiturn_chat_java Region-Tag: android_gemini_developer_api_generate_image_from_text_java Region-Tag: android_gemini_developer_api_edit_image_java Region-Tag: android_gemini_developer_api_edit_image_chat_java
1 parent c4096d2 commit 61a97f5

File tree

5 files changed

+598
-0
lines changed

5 files changed

+598
-0
lines changed

gradle/libs.versions.toml

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
12
[versions]
23
accompanist = "0.36.0"
34
activityKtx = "1.11.0"
@@ -40,10 +41,13 @@ compose-latest = "1.9.2"
4041
composeUiTooling = "1.5.2"
4142
coreSplashscreen = "1.0.1"
4243
coroutines = "1.10.2"
44+
firebase-bom = "34.3.0"
4345
glide = "1.0.0-beta08"
4446
google-maps = "19.2.0"
4547
gradle-versions = "0.53.0"
4648
guava = "33.5.0-jre"
49+
guava-android = "31.0.1-android"
50+
reactive-streams = "1.0.4"
4751
hilt = "2.57.1"
4852
horologist = "0.8.2-alpha"
4953
junit = "4.13.2"
@@ -173,11 +177,15 @@ appcompat = { module = "androidx.appcompat:appcompat", version.ref = "appcompat"
173177
coil-kt-compose = { module = "io.coil-kt:coil-compose", version.ref = "coil" }
174178
compose-foundation = { module = "androidx.wear.compose:compose-foundation", version.ref = "wearComposeFoundation" }
175179
compose-ui-tooling = { module = "androidx.wear.compose:compose-ui-tooling", version.ref = "composeUiTooling" }
180+
firebase-bom = { module = "com.google.firebase:firebase-bom", version.ref = "firebase-bom" }
181+
firebase-ai = { module = "com.google.firebase:firebase-ai" }
176182
glide-compose = { module = "com.github.bumptech.glide:compose", version.ref = "glide" }
177183
google-android-material = { module = "com.google.android.material:material", version.ref = "material" }
178184
googlemaps-compose = { module = "com.google.maps.android:maps-compose", version.ref = "maps-compose" }
179185
googlemaps-maps = { module = "com.google.android.gms:play-services-maps", version.ref = "google-maps" }
180186
guava = { module = "com.google.guava:guava", version.ref = "guava" }
187+
guava-android = { module = "com.google.guava:guava", version.ref = "guava-android" }
188+
reactive-streams = { module = "org.reactivestreams:reactive-streams", version.ref = "reactive-streams" }
181189
hilt-android = { module = "com.google.dagger:hilt-android", version.ref = "hilt" }
182190
hilt-compiler = { module = "com.google.dagger:hilt-android-compiler", version.ref = "hilt" }
183191
horologist-compose-layout = { module = "com.google.android.horologist:horologist-compose-layout", version.ref = "horologist" }

misc/build.gradle.kts

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
12
plugins {
23
alias(libs.plugins.android.application)
34
alias(libs.plugins.kotlin.android)
@@ -72,6 +73,10 @@ dependencies {
7273
implementation(libs.androidx.startup.runtime)
7374
implementation(libs.androidx.window.java)
7475
implementation(libs.appcompat)
76+
implementation(platform(libs.firebase.bom))
77+
implementation(libs.firebase.ai)
78+
implementation(libs.guava.android)
79+
implementation(libs.reactive.streams)
7580
testImplementation(libs.junit)
7681
testImplementation(kotlin("test"))
7782
androidTestImplementation(libs.androidx.test.ext.junit)
Lines changed: 196 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,196 @@
1+
/*
2+
* Copyright 2025 The Android Open Source Project
3+
*
4+
* Licensed under the Apache License, Version 2.0 (the "License");
5+
* you may not use this file except in compliance with the License.
6+
* You may obtain a copy of the License at
7+
*
8+
* https://www.apache.org/licenses/LICENSE-2.0
9+
*
10+
* Unless required by applicable law or agreed to in writing, software
11+
* distributed under the License is distributed on an "AS IS" BASIS,
12+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
* See the License for the specific language governing permissions and
14+
* limitations under the License.
15+
*/
16+
17+
package com.example.snippets.ai
18+
19+
import android.content.ContentResolver
20+
import android.graphics.Bitmap
21+
import android.net.Uri
22+
import com.google.firebase.Firebase
23+
import com.google.firebase.ai.ai
24+
import com.google.firebase.ai.type.GenerativeBackend
25+
import com.google.firebase.ai.type.ImagePart
26+
import com.google.firebase.ai.type.ResponseModality
27+
import com.google.firebase.ai.type.content
28+
import com.google.firebase.ai.type.generationConfig
29+
import kotlinx.coroutines.CoroutineScope
30+
import kotlinx.coroutines.launch
31+
32+
object GeminiDeveloperApi25FlashModelConfiguration {
33+
// [START android_gemini_developer_api_gemini_25_flash_model]
34+
// Start by instantiating a GenerativeModel and specifying the model name:
35+
val model = Firebase.ai(backend = GenerativeBackend.googleAI())
36+
.generativeModel("gemini-2.5-flash")
37+
// [END android_gemini_developer_api_gemini_25_flash_model]
38+
}
39+
40+
object Gemini25FlashImagePreviewModelConfiguration {
41+
// [START android_gemini_developer_api_gemini_25_flash_image_model]
42+
val model = Firebase.ai(backend = GenerativeBackend.googleAI()).generativeModel(
43+
modelName = "gemini-2.5-flash-image-preview",
44+
// Configure the model to respond with text and images (required)
45+
generationConfig = generationConfig {
46+
responseModalities = listOf(
47+
ResponseModality.TEXT,
48+
ResponseModality.IMAGE
49+
)
50+
}
51+
)
52+
// [END android_gemini_developer_api_gemini_25_flash_image_model]
53+
}
54+
55+
fun textOnlyInput(scope: CoroutineScope) {
56+
val model = GeminiDeveloperApi25FlashModelConfiguration.model
57+
// [START android_gemini_developer_api_text_only_input]
58+
scope.launch {
59+
val response = model.generateContent("Write a story about a magic backpack.")
60+
}
61+
// [END android_gemini_developer_api_text_only_input]
62+
}
63+
64+
fun textAndImageInput(scope: CoroutineScope, bitmap: Bitmap) {
65+
val model = GeminiDeveloperApi25FlashModelConfiguration.model
66+
// [START android_gemini_developer_api_multimodal_input]
67+
scope.launch {
68+
val response = model.generateContent(
69+
content {
70+
image(bitmap)
71+
text("what is the object in the picture?")
72+
}
73+
)
74+
}
75+
// [END android_gemini_developer_api_multimodal_input]
76+
}
77+
78+
fun textAndAudioInput(
79+
scope: CoroutineScope,
80+
contentResolver: ContentResolver,
81+
audioUri: Uri
82+
) {
83+
val model = GeminiDeveloperApi25FlashModelConfiguration.model
84+
// [START android_gemini_developer_api_multimodal_audio_input]
85+
scope.launch {
86+
contentResolver.openInputStream(audioUri).use { stream ->
87+
stream?.let {
88+
val bytes = it.readBytes()
89+
90+
val prompt = content {
91+
inlineData(bytes, "audio/mpeg") // Specify the appropriate audio MIME type
92+
text("Transcribe this audio recording.")
93+
}
94+
95+
val response = model.generateContent(prompt)
96+
}
97+
}
98+
}
99+
// [END android_gemini_developer_api_multimodal_audio_input]
100+
}
101+
102+
fun textAndVideoInput(
103+
scope: CoroutineScope,
104+
contentResolver: ContentResolver,
105+
videoUri: Uri
106+
) {
107+
val model = GeminiDeveloperApi25FlashModelConfiguration.model
108+
// [START android_gemini_developer_api_multimodal_video_input]
109+
scope.launch {
110+
contentResolver.openInputStream(videoUri).use { stream ->
111+
stream?.let {
112+
val bytes = it.readBytes()
113+
114+
val prompt = content {
115+
inlineData(bytes, "video/mp4") // Specify the appropriate video MIME type
116+
text("Describe the content of this video")
117+
}
118+
119+
val response = model.generateContent(prompt)
120+
}
121+
}
122+
}
123+
// [END android_gemini_developer_api_multimodal_video_input]
124+
}
125+
126+
fun multiTurnChat(scope: CoroutineScope) {
127+
val model = GeminiDeveloperApi25FlashModelConfiguration.model
128+
// [START android_gemini_developer_api_multiturn_chat]
129+
val chat = model.startChat(
130+
history = listOf(
131+
content(role = "user") { text("Hello, I have 2 dogs in my house.") },
132+
content(role = "model") { text("Great to meet you. What would you like to know?") }
133+
)
134+
)
135+
136+
scope.launch {
137+
val response = chat.sendMessage("How many paws are in my house?")
138+
}
139+
// [END android_gemini_developer_api_multiturn_chat]
140+
}
141+
142+
fun generateImageFromText(scope: CoroutineScope) {
143+
val model = Gemini25FlashImagePreviewModelConfiguration.model
144+
// [START android_gemini_developer_api_generate_image_from_text]
145+
scope.launch {
146+
// Provide a text prompt instructing the model to generate an image
147+
val prompt =
148+
"A hyper realistic picture of a t-rex with a blue bag pack roaming a pre-historic forest."
149+
// To generate image output, call `generateContent` with the text input
150+
val generatedImageAsBitmap: Bitmap? = model.generateContent(prompt)
151+
.candidates.first().content.parts.filterIsInstance<ImagePart>()
152+
.firstOrNull()?.image
153+
}
154+
// [END android_gemini_developer_api_generate_image_from_text]
155+
}
156+
157+
fun editImage(scope: CoroutineScope, bitmap: Bitmap) {
158+
val model = Gemini25FlashImagePreviewModelConfiguration.model
159+
// [START android_gemini_developer_api_edit_image]
160+
scope.launch {
161+
// Provide a text prompt instructing the model to edit the image
162+
val prompt = content {
163+
image(bitmap)
164+
text("Edit this image to make it look like a cartoon")
165+
}
166+
// To edit the image, call `generateContent` with the prompt (image and text input)
167+
val generatedImageAsBitmap: Bitmap? = model.generateContent(prompt)
168+
.candidates.first().content.parts.filterIsInstance<ImagePart>().firstOrNull()?.image
169+
// Handle the generated text and image
170+
}
171+
// [END android_gemini_developer_api_edit_image]
172+
}
173+
174+
fun editImageWithChat(scope: CoroutineScope, bitmap: Bitmap) {
175+
val model = Gemini25FlashImagePreviewModelConfiguration.model
176+
// [START android_gemini_developer_api_edit_image_chat]
177+
scope.launch {
178+
// Create the initial prompt instructing the model to edit the image
179+
val prompt = content {
180+
image(bitmap)
181+
text("Edit this image to make it look like a cartoon")
182+
}
183+
// Initialize the chat
184+
val chat = model.startChat()
185+
// To generate an initial response, send a user message with the image and text prompt
186+
var response = chat.sendMessage(prompt)
187+
// Inspect the returned image
188+
var generatedImageAsBitmap: Bitmap? = response
189+
.candidates.first().content.parts.filterIsInstance<ImagePart>().firstOrNull()?.image
190+
// Follow up requests do not need to specify the image again
191+
response = chat.sendMessage("But make it old-school line drawing style")
192+
generatedImageAsBitmap = response
193+
.candidates.first().content.parts.filterIsInstance<ImagePart>().firstOrNull()?.image
194+
}
195+
// [END android_gemini_developer_api_edit_image_chat]
196+
}

0 commit comments

Comments
 (0)