Skip to content

Commit 7f45bcf

Browse files
authored
Add Gemini Developer API code snippets (#649)
This commit introduces code snippets for the Gemini Developer API and Imagen APIs, with examples in both Kotlin and Java. These snippets support the following documentation: * https://developer.android.com/ai/gemini/developer-api * https://developer.android.com/ai/imagen
1 parent 6c67e34 commit 7f45bcf

File tree

8 files changed

+776
-4
lines changed

8 files changed

+776
-4
lines changed

build.gradle.kts

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,3 @@
1-
import org.codehaus.groovy.runtime.DefaultGroovyMethods.step
2-
import org.jetbrains.kotlin.gradle.internal.builtins.StandardNames.FqNames.target
3-
41
// Top-level build file where you can add configuration options common to all sub-projects/modules.
52
plugins {
63
alias(libs.plugins.gradle.versions)
@@ -94,7 +91,11 @@ allprojects {
9491
}
9592
format("xml") {
9693
target("**/*.xml")
97-
targetExclude("**/build/**/*.xml", "spotless/**/*.xml")
94+
targetExclude(
95+
"**/build/**/*.xml",
96+
"spotless/**/*.xml",
97+
".idea/**",
98+
)
9899
// Look for the root tag or a tag that is a snippet
99100
licenseHeaderFile(rootProject.file("spotless/copyright.xml"), "(<[a-zA-Z])|(<!--\\s+(//\\s*)?\\[START)").skipLinesMatching(".*START.*")
100101
}

gradle/libs.versions.toml

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,10 +40,13 @@ compose-latest = "1.9.3"
4040
composeUiTooling = "1.5.3"
4141
coreSplashscreen = "1.0.1"
4242
coroutines = "1.10.2"
43+
firebase-bom = "34.4.0"
4344
glide = "1.0.0-beta08"
4445
google-maps = "19.2.0"
4546
gradle-versions = "0.53.0"
4647
guava = "33.5.0-jre"
48+
guava-android = "31.0.1-android"
49+
reactive-streams = "1.0.4"
4750
hilt = "2.57.2"
4851
horologist = "0.8.2-alpha"
4952
junit = "4.13.2"
@@ -176,11 +179,15 @@ appcompat = { module = "androidx.appcompat:appcompat", version.ref = "appcompat"
176179
coil-kt-compose = { module = "io.coil-kt:coil-compose", version.ref = "coil" }
177180
compose-foundation = { module = "androidx.wear.compose:compose-foundation", version.ref = "wearComposeFoundation" }
178181
compose-ui-tooling = { module = "androidx.wear.compose:compose-ui-tooling", version.ref = "composeUiTooling" }
182+
firebase-bom = { module = "com.google.firebase:firebase-bom", version.ref = "firebase-bom" }
183+
firebase-ai = { module = "com.google.firebase:firebase-ai" }
179184
glide-compose = { module = "com.github.bumptech.glide:compose", version.ref = "glide" }
180185
google-android-material = { module = "com.google.android.material:material", version.ref = "material" }
181186
googlemaps-compose = { module = "com.google.maps.android:maps-compose", version.ref = "maps-compose" }
182187
googlemaps-maps = { module = "com.google.android.gms:play-services-maps", version.ref = "google-maps" }
183188
guava = { module = "com.google.guava:guava", version.ref = "guava" }
189+
guava-android = { module = "com.google.guava:guava", version.ref = "guava-android" }
190+
reactive-streams = { module = "org.reactivestreams:reactive-streams", version.ref = "reactive-streams" }
184191
hilt-android = { module = "com.google.dagger:hilt-android", version.ref = "hilt" }
185192
hilt-compiler = { module = "com.google.dagger:hilt-android-compiler", version.ref = "hilt" }
186193
horologist-compose-layout = { module = "com.google.android.horologist:horologist-compose-layout", version.ref = "horologist" }

misc/build.gradle.kts

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
12
plugins {
23
alias(libs.plugins.android.application)
34
alias(libs.plugins.kotlin.android)
@@ -72,6 +73,10 @@ dependencies {
7273
implementation(libs.androidx.startup.runtime)
7374
implementation(libs.androidx.window.java)
7475
implementation(libs.appcompat)
76+
implementation(platform(libs.firebase.bom))
77+
implementation(libs.firebase.ai)
78+
implementation(libs.guava.android)
79+
implementation(libs.reactive.streams)
7580
testImplementation(libs.junit)
7681
testImplementation(kotlin("test"))
7782
androidTestImplementation(libs.androidx.test.ext.junit)
Lines changed: 204 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,204 @@
1+
/*
2+
* Copyright 2025 The Android Open Source Project
3+
*
4+
* Licensed under the Apache License, Version 2.0 (the "License");
5+
* you may not use this file except in compliance with the License.
6+
* You may obtain a copy of the License at
7+
*
8+
* https://www.apache.org/licenses/LICENSE-2.0
9+
*
10+
* Unless required by applicable law or agreed to in writing, software
11+
* distributed under the License is distributed on an "AS IS" BASIS,
12+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
* See the License for the specific language governing permissions and
14+
* limitations under the License.
15+
*/
16+
17+
package com.example.snippets.ai
18+
19+
import android.content.ContentResolver
20+
import android.graphics.Bitmap
21+
import android.net.Uri
22+
import com.google.firebase.Firebase
23+
import com.google.firebase.ai.ai
24+
import com.google.firebase.ai.type.GenerativeBackend
25+
import com.google.firebase.ai.type.ImagePart
26+
import com.google.firebase.ai.type.ResponseModality
27+
import com.google.firebase.ai.type.content
28+
import com.google.firebase.ai.type.generationConfig
29+
import kotlinx.coroutines.CoroutineScope
30+
import kotlinx.coroutines.launch
31+
32+
object GeminiDeveloperApi25FlashModelConfiguration {
33+
// [START android_gemini_developer_api_gemini_25_flash_model]
34+
// Start by instantiating a GenerativeModel and specifying the model name:
35+
val model = Firebase.ai(backend = GenerativeBackend.googleAI())
36+
.generativeModel("gemini-2.5-flash")
37+
// [END android_gemini_developer_api_gemini_25_flash_model]
38+
}
39+
40+
object Gemini25FlashImagePreviewModelConfiguration {
41+
// [START android_gemini_developer_api_gemini_25_flash_image_model]
42+
val model = Firebase.ai(backend = GenerativeBackend.googleAI()).generativeModel(
43+
modelName = "gemini-2.5-flash-image-preview",
44+
// Configure the model to respond with text and images (required)
45+
generationConfig = generationConfig {
46+
responseModalities = listOf(
47+
ResponseModality.TEXT,
48+
ResponseModality.IMAGE
49+
)
50+
}
51+
)
52+
// [END android_gemini_developer_api_gemini_25_flash_image_model]
53+
}
54+
55+
@Suppress("unused")
56+
fun textOnlyInput(scope: CoroutineScope) {
57+
val model = GeminiDeveloperApi25FlashModelConfiguration.model
58+
// [START android_gemini_developer_api_text_only_input]
59+
scope.launch {
60+
val response = model.generateContent("Write a story about a magic backpack.")
61+
}
62+
// [END android_gemini_developer_api_text_only_input]
63+
}
64+
65+
@Suppress("unused")
66+
fun textAndImageInput(scope: CoroutineScope, bitmap: Bitmap) {
67+
val model = GeminiDeveloperApi25FlashModelConfiguration.model
68+
// [START android_gemini_developer_api_multimodal_input]
69+
scope.launch {
70+
val response = model.generateContent(
71+
content {
72+
image(bitmap)
73+
text("what is the object in the picture?")
74+
}
75+
)
76+
}
77+
// [END android_gemini_developer_api_multimodal_input]
78+
}
79+
80+
@Suppress("unused")
81+
fun textAndAudioInput(
82+
scope: CoroutineScope,
83+
contentResolver: ContentResolver,
84+
audioUri: Uri
85+
) {
86+
val model = GeminiDeveloperApi25FlashModelConfiguration.model
87+
// [START android_gemini_developer_api_multimodal_audio_input]
88+
scope.launch {
89+
contentResolver.openInputStream(audioUri).use { stream ->
90+
stream?.let {
91+
val bytes = it.readBytes()
92+
93+
val prompt = content {
94+
inlineData(bytes, "audio/mpeg") // Specify the appropriate audio MIME type
95+
text("Transcribe this audio recording.")
96+
}
97+
98+
val response = model.generateContent(prompt)
99+
}
100+
}
101+
}
102+
// [END android_gemini_developer_api_multimodal_audio_input]
103+
}
104+
105+
@Suppress("unused")
106+
fun textAndVideoInput(
107+
scope: CoroutineScope,
108+
contentResolver: ContentResolver,
109+
videoUri: Uri
110+
) {
111+
val model = GeminiDeveloperApi25FlashModelConfiguration.model
112+
// [START android_gemini_developer_api_multimodal_video_input]
113+
scope.launch {
114+
contentResolver.openInputStream(videoUri).use { stream ->
115+
stream?.let {
116+
val bytes = it.readBytes()
117+
118+
val prompt = content {
119+
inlineData(bytes, "video/mp4") // Specify the appropriate video MIME type
120+
text("Describe the content of this video")
121+
}
122+
123+
val response = model.generateContent(prompt)
124+
}
125+
}
126+
}
127+
// [END android_gemini_developer_api_multimodal_video_input]
128+
}
129+
130+
@Suppress("unused")
131+
fun multiTurnChat(scope: CoroutineScope) {
132+
val model = GeminiDeveloperApi25FlashModelConfiguration.model
133+
// [START android_gemini_developer_api_multiturn_chat]
134+
val chat = model.startChat(
135+
history = listOf(
136+
content(role = "user") { text("Hello, I have 2 dogs in my house.") },
137+
content(role = "model") { text("Great to meet you. What would you like to know?") }
138+
)
139+
)
140+
141+
scope.launch {
142+
val response = chat.sendMessage("How many paws are in my house?")
143+
}
144+
// [END android_gemini_developer_api_multiturn_chat]
145+
}
146+
147+
@Suppress("unused")
148+
fun generateImageFromText(scope: CoroutineScope) {
149+
val model = Gemini25FlashImagePreviewModelConfiguration.model
150+
// [START android_gemini_developer_api_generate_image_from_text]
151+
scope.launch {
152+
// Provide a text prompt instructing the model to generate an image
153+
val prompt =
154+
"A hyper realistic picture of a t-rex with a blue bag pack roaming a pre-historic forest."
155+
// To generate image output, call `generateContent` with the text input
156+
val generatedImageAsBitmap: Bitmap? = model.generateContent(prompt)
157+
.candidates.first().content.parts.filterIsInstance<ImagePart>()
158+
.firstOrNull()?.image
159+
}
160+
// [END android_gemini_developer_api_generate_image_from_text]
161+
}
162+
163+
@Suppress("unused")
164+
fun editImage(scope: CoroutineScope, bitmap: Bitmap) {
165+
val model = Gemini25FlashImagePreviewModelConfiguration.model
166+
// [START android_gemini_developer_api_edit_image]
167+
scope.launch {
168+
// Provide a text prompt instructing the model to edit the image
169+
val prompt = content {
170+
image(bitmap)
171+
text("Edit this image to make it look like a cartoon")
172+
}
173+
// To edit the image, call `generateContent` with the prompt (image and text input)
174+
val generatedImageAsBitmap: Bitmap? = model.generateContent(prompt)
175+
.candidates.first().content.parts.filterIsInstance<ImagePart>().firstOrNull()?.image
176+
// Handle the generated text and image
177+
}
178+
// [END android_gemini_developer_api_edit_image]
179+
}
180+
181+
@Suppress("unused")
182+
fun editImageWithChat(scope: CoroutineScope, bitmap: Bitmap) {
183+
val model = Gemini25FlashImagePreviewModelConfiguration.model
184+
// [START android_gemini_developer_api_edit_image_chat]
185+
scope.launch {
186+
// Create the initial prompt instructing the model to edit the image
187+
val prompt = content {
188+
image(bitmap)
189+
text("Edit this image to make it look like a cartoon")
190+
}
191+
// Initialize the chat
192+
val chat = model.startChat()
193+
// To generate an initial response, send a user message with the image and text prompt
194+
var response = chat.sendMessage(prompt)
195+
// Inspect the returned image
196+
var generatedImageAsBitmap: Bitmap? = response
197+
.candidates.first().content.parts.filterIsInstance<ImagePart>().firstOrNull()?.image
198+
// Follow up requests do not need to specify the image again
199+
response = chat.sendMessage("But make it old-school line drawing style")
200+
generatedImageAsBitmap = response
201+
.candidates.first().content.parts.filterIsInstance<ImagePart>().firstOrNull()?.image
202+
}
203+
// [END android_gemini_developer_api_edit_image_chat]
204+
}

0 commit comments

Comments
 (0)