@@ -57,6 +57,7 @@ import com.google.ai.sample.ApiProvider
5757import com.google.ai.edge.litertlm.Backend
5858import com.google.ai.edge.litertlm.Engine
5959import com.google.ai.edge.litertlm.EngineConfig
60+ import com.google.ai.edge.litertlm.NativeLibraryLoader
6061import com.google.mediapipe.tasks.genai.llminference.LlmInference
6162import okhttp3.MediaType.Companion.toMediaType
6263import okhttp3.OkHttpClient
@@ -86,6 +87,7 @@ class PhotoReasoningViewModel(
8687
8788 private var llmInference: LlmInference ? = null
8889 private var liteRtEngine: Engine ? = null
90+ private var liteRtNativeLoaded = false
8991 private val TAG = " PhotoReasoningViewModel"
9092
9193 // WebRTC & Signaling
@@ -337,6 +339,7 @@ class PhotoReasoningViewModel(
337339 if (! isLiteRtAbiSupported()) {
338340 return " Gemma 4 offline is only supported on arm64-v8a or x86_64 devices."
339341 }
342+ ensureLiteRtNativeLoaded()
340343 if (liteRtEngine == null ) {
341344 val liteRtBackend = if (backend == InferenceBackend .GPU ) Backend .GPU else Backend .CPU
342345 val engineConfig = EngineConfig (
@@ -388,6 +391,18 @@ class PhotoReasoningViewModel(
388391 }
389392 }
390393
394+ private fun ensureLiteRtNativeLoaded () {
395+ if (liteRtNativeLoaded) return
396+
397+ runCatching {
398+ NativeLibraryLoader .INSTANCE .load()
399+ }.recoverCatching {
400+ System .loadLibrary(" litertlm_jni" )
401+ }.getOrThrow()
402+
403+ liteRtNativeLoaded = true
404+ }
405+
391406 private fun isLiteRtAbiSupported (): Boolean {
392407 val supportedAbis = Build .SUPPORTED_ABIS ?.toSet().orEmpty()
393408 return supportedAbis.contains(" arm64-v8a" ) || supportedAbis.contains(" x86_64" )
0 commit comments