Skip to content

Commit 6fb9a40

Browse files
Restore Gemma4 backend choice and add init diagnostics
1 parent c2ff8f4 commit 6fb9a40

1 file changed

Lines changed: 18 additions & 13 deletions

File tree

app/src/main/kotlin/com/google/ai/sample/feature/multimodal/PhotoReasoningViewModel.kt

Lines changed: 18 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -338,20 +338,24 @@ class PhotoReasoningViewModel(
338338
if (!isLiteRtAbiSupported()) {
339339
return "Gemma 4 offline is only supported on arm64-v8a or x86_64 devices."
340340
}
341+
Log.i(
342+
TAG,
343+
"Initializing Gemma 4 LiteRT engine. preferredBackend=$backend, " +
344+
"abis=${Build.SUPPORTED_ABIS?.joinToString() ?: "unknown"}, " +
345+
"modelPath=${modelFile.absolutePath}, modelSizeBytes=${modelFile.length()}"
346+
)
341347
ensureLiteRtNativeLoaded()
342348
if (liteRtEngine == null) {
343-
val liteRtBackend = Backend.CPU
344-
if (backend == InferenceBackend.GPU) {
345-
Log.w(
346-
TAG,
347-
"Gemma 4 offline currently forces CPU backend to avoid native crashes on GPU initialization."
348-
)
349-
}
349+
val liteRtBackend = if (backend == InferenceBackend.GPU) Backend.GPU else Backend.CPU
350350
val engineConfig = EngineConfig(
351351
modelPath = modelFile.absolutePath,
352352
backend = liteRtBackend,
353353
cacheDir = context.cacheDir.absolutePath
354354
)
355+
Log.i(
356+
TAG,
357+
"Creating LiteRT engine with backend=$liteRtBackend cacheDir=${context.cacheDir.absolutePath}"
358+
)
355359
liteRtEngine = Engine(engineConfig).also { it.initialize() }
356360
Log.d(TAG, "Offline model initialized with LiteRT-LM Engine backend=$liteRtBackend")
357361
}
@@ -381,6 +385,12 @@ class PhotoReasoningViewModel(
381385
return null // Already initialized or no model file
382386
} catch (e: Exception) {
383387
Log.e(TAG, "Failed to initialize offline model", e)
388+
Log.e(
389+
TAG,
390+
"Offline init context: model=${com.google.ai.sample.GenerativeAiViewModelFactory.getCurrentModel()}, " +
391+
"preferredBackend=${GenerativeAiViewModelFactory.getBackend()}, " +
392+
"abis=${Build.SUPPORTED_ABIS?.joinToString() ?: "unknown"}"
393+
)
384394
val msg = e.message ?: e.toString()
385395
if (msg.contains("nativeCheckLoaded", ignoreCase = true) ||
386396
msg.contains("No implementation found", ignoreCase = true) ||
@@ -462,12 +472,7 @@ class PhotoReasoningViewModel(
462472
}
463473

464474
private fun isOfflineGpuModelLoaded(): Boolean {
465-
val currentModel = com.google.ai.sample.GenerativeAiViewModelFactory.getCurrentModel()
466-
if (currentModel == ModelOption.GEMMA_4_E4B_IT) {
467-
// Gemma 4 offline currently runs with CPU backend for stability.
468-
return false
469-
}
470-
return currentModel.isOfflineModel &&
475+
return com.google.ai.sample.GenerativeAiViewModelFactory.getCurrentModel().isOfflineModel &&
471476
com.google.ai.sample.GenerativeAiViewModelFactory.getBackend() == InferenceBackend.GPU &&
472477
(llmInference != null || liteRtEngine != null)
473478
}

0 commit comments

Comments
 (0)