Skip to content

Commit cfdb6a9

Browse files
authored
Fix flash image model and add remaining Imagen image editing snippets (#667)
* feat(ai): Fix flash image model and add remaining Imagen image editing snippets Region-Tag: android_gemini_developer_api_gemini_25_flash_image_model_java Region-Tag: android_imagen_model_configuration Region-Tag: android_imagen_vertex_model_configuration Region-Tag: android_imagen_generate_images Region-Tag: android_imagen_inpaint_insertion Region-Tag: android_imagen_inpaint_removal Region-Tag: android_imagen_editing_mask_editor Region-Tag: android_imagen_editing_create_mask Region-Tag: android_imagen_expand_image Region-Tag: android_imagen_replace_background Region-Tag: android_imagen_customize_subject Region-Tag: android_imagen_customize_control Region-Tag: android_imagen_customize_style * Restoring public documentation snippets for illustration purposes * Apply fixes to public documentation snippets This reverts commit a5837be.
1 parent afffc04 commit cfdb6a9

File tree

2 files changed

+340
-4
lines changed

2 files changed

+340
-4
lines changed

misc/src/main/java/com/example/snippets/ai/GeminiDeveloperApiSnippetsJava.java

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -34,9 +34,11 @@
3434
import com.google.firebase.ai.java.GenerativeModelFutures;
3535
import com.google.firebase.ai.type.Content;
3636
import com.google.firebase.ai.type.GenerateContentResponse;
37+
import com.google.firebase.ai.type.GenerationConfig;
3738
import com.google.firebase.ai.type.GenerativeBackend;
3839
import com.google.firebase.ai.type.ImagePart;
3940
import com.google.firebase.ai.type.Part;
41+
import com.google.firebase.ai.type.ResponseModality;
4042

4143
import java.io.File;
4244
import java.io.IOException;
@@ -73,10 +75,14 @@ static final class Gemini25FlashImagePreviewModelConfigurationJava {
7375

7476
static {
7577
// [START android_gemini_developer_api_gemini_25_flash_image_model_java]
76-
GenerativeModel firebaseAI = FirebaseAI.getInstance(GenerativeBackend.googleAI())
77-
.generativeModel("gemini-2.5-flash");
78-
79-
GenerativeModelFutures model = GenerativeModelFutures.from(firebaseAI);
78+
GenerativeModel ai = FirebaseAI.getInstance(GenerativeBackend.googleAI()).generativeModel(
79+
"gemini-2.5-flash-image-preview",
80+
// Configure the model to respond with text and images (required)
81+
new GenerationConfig.Builder()
82+
.setResponseModalities(Arrays.asList(ResponseModality.TEXT, ResponseModality.IMAGE))
83+
.build()
84+
);
85+
GenerativeModelFutures model = GenerativeModelFutures.from(ai);
8086
// [END android_gemini_developer_api_gemini_25_flash_image_model_java]
8187
Gemini25FlashImagePreviewModelConfigurationJava.model = model;
8288
}

misc/src/main/java/com/example/snippets/ai/ImagenSnippets.kt

Lines changed: 330 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,18 +18,67 @@
1818

1919
package com.example.snippets.ai
2020

21+
import android.graphics.Bitmap
22+
import android.graphics.Paint
23+
import androidx.compose.foundation.Canvas
24+
import androidx.compose.foundation.Image
25+
import androidx.compose.foundation.gestures.detectDragGestures
26+
import androidx.compose.foundation.layout.Box
27+
import androidx.compose.foundation.layout.Column
28+
import androidx.compose.foundation.layout.fillMaxSize
29+
import androidx.compose.foundation.layout.fillMaxWidth
30+
import androidx.compose.material3.Button
31+
import androidx.compose.material3.Text
32+
import androidx.compose.runtime.Composable
33+
import androidx.compose.runtime.getValue
34+
import androidx.compose.runtime.mutableFloatStateOf
35+
import androidx.compose.runtime.mutableStateListOf
36+
import androidx.compose.runtime.mutableStateOf
37+
import androidx.compose.runtime.remember
38+
import androidx.compose.runtime.setValue
39+
import androidx.compose.runtime.snapshots.SnapshotStateList
40+
import androidx.compose.ui.Modifier
41+
import androidx.compose.ui.geometry.Offset
42+
import androidx.compose.ui.graphics.Path
43+
import androidx.compose.ui.graphics.StrokeCap
44+
import androidx.compose.ui.graphics.StrokeJoin
45+
import androidx.compose.ui.graphics.asAndroidPath
46+
import androidx.compose.ui.graphics.asImageBitmap
47+
import androidx.compose.ui.graphics.drawscope.Stroke
48+
import androidx.compose.ui.graphics.drawscope.withTransform
49+
import androidx.compose.ui.input.pointer.pointerInput
50+
import androidx.compose.ui.layout.ContentScale
2151
import com.google.firebase.Firebase
52+
import com.google.firebase.ai.ImagenModel
2253
import com.google.firebase.ai.ai
54+
import com.google.firebase.ai.type.Dimensions
2355
import com.google.firebase.ai.type.GenerativeBackend
2456
import com.google.firebase.ai.type.ImagenAspectRatio
57+
import com.google.firebase.ai.type.ImagenBackgroundMask
58+
import com.google.firebase.ai.type.ImagenControlReference
59+
import com.google.firebase.ai.type.ImagenControlType
60+
import com.google.firebase.ai.type.ImagenEditingConfig
61+
import com.google.firebase.ai.type.ImagenEditMode
2562
import com.google.firebase.ai.type.ImagenGenerationConfig
63+
import com.google.firebase.ai.type.ImagenGenerationResponse
2664
import com.google.firebase.ai.type.ImagenImageFormat
65+
import com.google.firebase.ai.type.ImagenImagePlacement
66+
import com.google.firebase.ai.type.ImagenInlineImage
67+
import com.google.firebase.ai.type.ImagenMaskReference
2768
import com.google.firebase.ai.type.ImagenPersonFilterLevel
69+
import com.google.firebase.ai.type.ImagenRawImage
2870
import com.google.firebase.ai.type.ImagenSafetyFilterLevel
2971
import com.google.firebase.ai.type.ImagenSafetySettings
72+
import com.google.firebase.ai.type.ImagenStyleReference
73+
import com.google.firebase.ai.type.ImagenSubjectReference
74+
import com.google.firebase.ai.type.ImagenSubjectReferenceType
3075
import com.google.firebase.ai.type.PublicPreviewAPI
76+
import com.google.firebase.ai.type.toImagenInlineImage
3177
import kotlinx.coroutines.CoroutineScope
3278
import kotlinx.coroutines.launch
79+
import kotlin.math.min
80+
import android.graphics.Color as AndroidColor
81+
import androidx.compose.ui.graphics.Color as ComposeColor
3382

3483
private object ImagenModelConfiguration {
3584
// [START android_imagen_model_configuration]
@@ -53,6 +102,13 @@ private object ImagenModelConfiguration {
53102
// [END android_imagen_model_configuration]
54103
}
55104

105+
private object ImagenVertexAIModelConfiguration {
106+
// [START android_imagen_vertex_model_configuration]
107+
val imagenModel = Firebase.ai(backend = GenerativeBackend.vertexAI())
108+
.imagenModel("imagen-3.0-capability-001")
109+
// [END android_imagen_vertex_model_configuration]
110+
}
111+
56112
private fun generateImagesWithImagen(scope: CoroutineScope) {
57113
val model = ImagenModelConfiguration.model
58114
scope.launch {
@@ -65,3 +121,277 @@ private fun generateImagesWithImagen(scope: CoroutineScope) {
65121
// [END android_imagen_generate_images]
66122
}
67123
}
124+
125+
// [START android_imagen_inpaint_insertion]
126+
suspend fun insertFlowersIntoImage(
127+
model: ImagenModel,
128+
originalImage: Bitmap,
129+
mask: ImagenMaskReference
130+
): ImagenGenerationResponse<ImagenInlineImage> {
131+
val prompt = "a vase of flowers"
132+
133+
// Pass the original image, a mask, the prompt, and an editing configuration.
134+
val editedImage = model.editImage(
135+
referenceImages = listOf(
136+
ImagenRawImage(originalImage.toImagenInlineImage()),
137+
mask,
138+
),
139+
prompt = prompt,
140+
// Define the editing configuration for inpainting and insertion.
141+
config = ImagenEditingConfig(ImagenEditMode.INPAINT_INSERTION)
142+
)
143+
return editedImage
144+
}
145+
// [END android_imagen_inpaint_insertion]
146+
147+
// [START android_imagen_inpaint_removal]
148+
suspend fun removeBallFromImage(
149+
model: ImagenModel,
150+
originalImage: Bitmap,
151+
mask: ImagenMaskReference
152+
): ImagenGenerationResponse<ImagenInlineImage> {
153+
154+
// Optional: provide the prompt describing the content to be removed.
155+
val prompt = "a ball"
156+
157+
// Pass the original image, a mask, the prompt, and an editing configuration.
158+
val editedImage = model.editImage(
159+
referenceImages = listOf(
160+
ImagenRawImage(originalImage.toImagenInlineImage()),
161+
mask
162+
),
163+
prompt = prompt,
164+
// Define the editing configuration for inpainting and removal.
165+
config = ImagenEditingConfig(ImagenEditMode.INPAINT_REMOVAL)
166+
)
167+
168+
return editedImage
169+
}
170+
// [END android_imagen_inpaint_removal]
171+
172+
// [START android_imagen_editing_mask_editor]
173+
@Composable
174+
fun ImagenEditingMaskEditor(
175+
sourceBitmap: Bitmap,
176+
onMaskFinalized: (Bitmap) -> Unit,
177+
) {
178+
179+
val paths = remember { mutableStateListOf<Path>() }
180+
var currentPath by remember { mutableStateOf<Path?>(null) }
181+
var scale by remember { mutableFloatStateOf(1f) }
182+
var offsetX by remember { mutableFloatStateOf(0f) }
183+
var offsetY by remember { mutableFloatStateOf(0f) }
184+
185+
Column(
186+
modifier = Modifier.fillMaxSize(),
187+
) {
188+
Box(
189+
modifier = Modifier
190+
.fillMaxWidth()
191+
.pointerInput(Unit) {
192+
detectDragGestures(
193+
onDragStart = { startOffset ->
194+
val transformedStart = Offset(
195+
(startOffset.x - offsetX) / scale,
196+
(startOffset.y - offsetY) / scale,
197+
)
198+
currentPath = Path().apply { moveTo(transformedStart.x, transformedStart.y) }
199+
},
200+
onDrag = { change, _ ->
201+
currentPath?.let {
202+
val transformedChange = Offset(
203+
(change.position.x - offsetX) / scale,
204+
(change.position.y - offsetY) / scale,
205+
)
206+
it.lineTo(transformedChange.x, transformedChange.y)
207+
currentPath = Path().apply { addPath(it) }
208+
}
209+
change.consume()
210+
},
211+
onDragEnd = {
212+
currentPath?.let { paths.add(it) }
213+
currentPath = null
214+
},
215+
)
216+
},
217+
) {
218+
Image(
219+
bitmap = sourceBitmap.asImageBitmap(),
220+
contentDescription = null,
221+
modifier = Modifier.fillMaxSize(),
222+
contentScale = ContentScale.Fit,
223+
)
224+
Canvas(modifier = Modifier.fillMaxSize()) {
225+
val canvasWidth = size.width
226+
val canvasHeight = size.height
227+
val bitmapWidth = sourceBitmap.width.toFloat()
228+
val bitmapHeight = sourceBitmap.height.toFloat()
229+
scale = min(canvasWidth / bitmapWidth, canvasHeight / bitmapHeight)
230+
offsetX = (canvasWidth - bitmapWidth * scale) / 2
231+
offsetY = (canvasHeight - bitmapHeight * scale) / 2
232+
withTransform(
233+
{
234+
translate(left = offsetX, top = offsetY)
235+
scale(scale, scale, pivot = Offset.Zero)
236+
},
237+
) {
238+
val strokeWidth = 70f / scale
239+
val stroke = Stroke(width = strokeWidth, cap = StrokeCap.Round, join = StrokeJoin.Round)
240+
val pathColor = ComposeColor.White.copy(alpha = 0.5f)
241+
paths.forEach { path ->
242+
drawPath(path = path, color = pathColor, style = stroke)
243+
}
244+
currentPath?.let { path ->
245+
drawPath(path = path, color = pathColor, style = stroke)
246+
}
247+
}
248+
}
249+
}
250+
Button(
251+
onClick = {
252+
val maskBitmap = createMaskBitmap(sourceBitmap, paths)
253+
onMaskFinalized(maskBitmap)
254+
},
255+
) {
256+
Text("Save mask")
257+
}
258+
}
259+
}
260+
// [END android_imagen_editing_mask_editor]
261+
262+
// [START android_imagen_editing_create_mask]
263+
private fun createMaskBitmap(
264+
sourceBitmap: Bitmap,
265+
paths: SnapshotStateList<Path>,
266+
): Bitmap {
267+
val maskBitmap = Bitmap.createBitmap(sourceBitmap.width, sourceBitmap.height, Bitmap.Config.ARGB_8888)
268+
val canvas = android.graphics.Canvas(maskBitmap)
269+
val paint = Paint().apply {
270+
color = AndroidColor.RED
271+
strokeWidth = 70f
272+
style = Paint.Style.STROKE
273+
strokeCap = Paint.Cap.ROUND
274+
strokeJoin = Paint.Join.ROUND
275+
isAntiAlias = true
276+
}
277+
paths.forEach { path -> canvas.drawPath(path.asAndroidPath(), paint) }
278+
279+
return maskBitmap
280+
}
281+
// [END android_imagen_editing_create_mask]
282+
283+
// [START android_imagen_expand_image]
284+
suspend fun expandImage(originalImage: Bitmap, imagenModel: ImagenModel): ImagenGenerationResponse<ImagenInlineImage> {
285+
286+
// Optionally describe what should appear in the expanded area.
287+
val prompt = "a sprawling sandy beach next to the ocean"
288+
289+
val editedImage = imagenModel.outpaintImage(
290+
originalImage.toImagenInlineImage(),
291+
Dimensions(1024, 1024),
292+
prompt = prompt,
293+
newPosition = ImagenImagePlacement.LEFT_CENTER
294+
)
295+
296+
297+
return editedImage
298+
}
299+
// [END android_imagen_expand_image]
300+
301+
// [START android_imagen_replace_background]
302+
suspend fun replaceBackground(model: ImagenModel, originalImage: Bitmap): ImagenGenerationResponse<ImagenInlineImage> {
303+
// Provide the prompt describing the new background.
304+
val prompt = "space background"
305+
306+
// Pass the original image, a mask, the prompt, and an editing configuration.
307+
val editedImage = model.editImage(
308+
referenceImages = listOf(
309+
ImagenRawImage(originalImage.toImagenInlineImage()),
310+
ImagenBackgroundMask(),
311+
),
312+
prompt = prompt,
313+
config = ImagenEditingConfig(ImagenEditMode.INPAINT_INSERTION)
314+
)
315+
316+
return editedImage
317+
}
318+
// [END android_imagen_replace_background]
319+
320+
// [START android_imagen_customize_subject]
321+
suspend fun customizeCatImage(model: ImagenModel, referenceCatImage: Bitmap): ImagenGenerationResponse<ImagenInlineImage> {
322+
323+
// Define the subject reference using the reference image.
324+
val subjectReference = ImagenSubjectReference(
325+
image = referenceCatImage.toImagenInlineImage(),
326+
referenceId = 1,
327+
description = "cat",
328+
subjectType = ImagenSubjectReferenceType.ANIMAL
329+
)
330+
331+
// Provide a prompt that describes the final image.
332+
// The "[1]" links the prompt to the subject reference with ID 1.
333+
val prompt = "A cat[1] flying through outer space"
334+
335+
// Use the editImage API to perform the subject customization.
336+
val editedImage = model.editImage(
337+
referenceImages = listOf(subjectReference),
338+
prompt = prompt,
339+
config = ImagenEditingConfig(
340+
editSteps = 50 // Number of editing steps, a higher value can improve quality
341+
)
342+
)
343+
344+
return editedImage
345+
}
346+
// [END android_imagen_customize_subject]
347+
348+
// [START android_imagen_customize_control]
349+
suspend fun customizeCatImageByControl(model: ImagenModel, referenceImage: Bitmap): ImagenGenerationResponse<ImagenInlineImage> {
350+
351+
// Define the subject reference using the reference image.
352+
val controlReference = ImagenControlReference(
353+
image = referenceImage.toImagenInlineImage(),
354+
referenceId = 1,
355+
type = ImagenControlType.SCRIBBLE,
356+
)
357+
358+
val prompt = "A cat flying through outer space arranged like the scribble map[1]"
359+
360+
val editedImage = model.editImage(
361+
referenceImages = listOf(controlReference),
362+
prompt = prompt,
363+
config = ImagenEditingConfig(
364+
editSteps = 50
365+
),
366+
)
367+
368+
return editedImage
369+
}
370+
// [END android_imagen_customize_control]
371+
372+
// [START android_imagen_customize_style]
373+
suspend fun customizeImageByStyle(model: ImagenModel, referenceVanGoghImage: Bitmap): ImagenGenerationResponse<ImagenInlineImage> {
374+
375+
// Define the style reference using the reference image.
376+
val styleReference = ImagenStyleReference(
377+
image = referenceVanGoghImage.toImagenInlineImage(),
378+
referenceId = 1,
379+
description = "Van Gogh style"
380+
)
381+
382+
// Provide a prompt that describes the final image.
383+
// The "1" links the prompt to the style reference with ID 1.
384+
val prompt = "A cat flying through outer space, in the Van Gogh style[1]"
385+
386+
// Use the editImage API to perform the style customization.
387+
val editedImage = model.editImage(
388+
referenceImages = listOf(styleReference),
389+
prompt = prompt,
390+
config = ImagenEditingConfig(
391+
editSteps = 50 // Number of editing steps, a higher value can improve quality
392+
),
393+
)
394+
395+
return editedImage
396+
}
397+
// [END android_imagen_customize_style]

0 commit comments

Comments
 (0)