diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 9e38ca2ae..5c43f08da 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -7,20 +7,20 @@ assignees: '' --- -**Describe the bug:** +**Describe the bug:** A clear and concise description of what the bug is. -**To Reproduce:** +**To Reproduce:** Steps to reproduce the behavior: 1. Go to '...' 2. Click on '....' 3. Scroll down to '....' 4. See error -**Expected behavior:** +**Expected behavior:** A clear and concise description of what you expected to happen. -**Screenshots:** +**Screenshots:** If applicable, add screenshots to help explain your problem. **Device & App Information (Please complete the following):** @@ -28,5 +28,5 @@ If applicable, add screenshots to help explain your problem. - Android Version: [e.g., Android 12, Android 13] - App Version: [e.g., 1.0.1, v1.0.2] -**Additional context:** +**Additional context:** Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index ae26d6f2b..041e344ee 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -7,14 +7,14 @@ assignees: '' --- -**Is your feature request related to a problem? Please describe.** +**Is your feature request related to a problem? Please describe.** A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -**Describe the solution you'd like** +**Describe the solution you'd like** A clear and concise description of what you want to happen. -**Describe alternatives you've considered** +**Describe alternatives you've considered** A clear and concise description of any alternative solutions or features you've considered. -**Additional context** +**Additional context** Add any other context or screenshots about the feature request here. diff --git a/.idx/dev.nix b/.idx/dev.nix new file mode 100644 index 000000000..ab83c388c --- /dev/null +++ b/.idx/dev.nix @@ -0,0 +1,55 @@ +# To learn more about how to use Nix to configure your environment +# see: https://firebase.google.com/docs/studio/customize-workspace +{ pkgs, ... }: { + # Which nixpkgs channel to use. + channel = "stable-24.05"; # or "unstable" + + # Use https://search.nixos.org/packages to find packages + packages = [ + # pkgs.go + # pkgs.python311 + # pkgs.python311Packages.pip + # pkgs.nodejs_20 + # pkgs.nodePackages.nodemon + ]; + + # Sets environment variables in the workspace + env = {}; + idx = { + # Search for the extensions you want on https://open-vsx.org/ and use "publisher.id" + extensions = [ + # "vscodevim.vim" + ]; + + # Enable previews + previews = { + enable = true; + previews = { + # web = { + # # Example: run "npm run dev" with PORT set to IDX's defined port for previews, + # # and show it in IDX's web preview panel + # command = ["npm" "run" "dev"]; + # manager = "web"; + # env = { + # # Environment variables to set for your server + # PORT = "$PORT"; + # }; + # }; + }; + }; + + # Workspace lifecycle hooks + workspace = { + # Runs when a workspace is first created + onCreate = { + # Example: install JS dependencies from NPM + # npm-install = "npm install"; + }; + # Runs when the workspace is (re)started + onStart = { + # Example: start a background task to watch and re-build backend code + # watch-backend = "npm run watch-backend"; + }; + }; + }; +} diff --git a/Android/src/app/build.gradle.kts b/Android/src/app/build.gradle.kts index f5782c174..15cec312c 100644 --- a/Android/src/app/build.gradle.kts +++ b/Android/src/app/build.gradle.kts @@ -13,6 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +import org.jetbrains.kotlin.gradle.dsl.JvmTarget plugins { alias(libs.plugins.android.application) @@ -24,6 +25,7 @@ plugins { alias(libs.plugins.protobuf) alias(libs.plugins.hilt.application) alias(libs.plugins.oss.licenses) + // alias(libs.plugins.objectbox) kotlin("kapt") } @@ -33,10 +35,10 @@ android { defaultConfig { applicationId = "com.google.aiedge.gallery" - minSdk = 31 + minSdk = 28 targetSdk = 35 versionCode = 1 - versionName = "1.0.4" + versionName = "1.1.0" // Needed for HuggingFace auth workflows. // Use the scheme of the "Redirect URLs" in HuggingFace app. @@ -44,6 +46,7 @@ android { "REPLACE_WITH_YOUR_REDIRECT_SCHEME_IN_HUGGINGFACE_APP" testInstrumentationRunner = "androidx.test.runner.AndroidJUnitRunner" + buildConfigField("boolean", "FIREBASE_ENABLED", "false") } buildTypes { @@ -54,12 +57,8 @@ android { } } compileOptions { - sourceCompatibility = JavaVersion.VERSION_11 - targetCompatibility = JavaVersion.VERSION_11 - } - kotlinOptions { - jvmTarget = "11" - freeCompilerArgs += "-Xcontext-receivers" + sourceCompatibility = JavaVersion.VERSION_21 + targetCompatibility = JavaVersion.VERSION_21 } buildFeatures { compose = true @@ -67,6 +66,14 @@ android { } } +kotlin { + compilerOptions { + jvmTarget = JvmTarget.JVM_21 + freeCompilerArgs.add("-Xcontext-receivers") + } + jvmToolchain(21) +} + dependencies { implementation(libs.androidx.core.ktx) implementation(libs.androidx.lifecycle.runtime.ktx) @@ -103,7 +110,13 @@ dependencies { implementation(libs.play.services.oss.licenses) implementation(platform(libs.firebase.bom)) implementation(libs.firebase.analytics) + implementation(libs.play.services.nearby) + implementation(libs.objectbox.android) + implementation(libs.objectbox.kotlin) + // implementation(libs.objectbox.java) + // implementation(libs.objectbox.vector) kapt(libs.hilt.android.compiler) + kapt(libs.objectbox.processor) testImplementation(libs.junit) androidTestImplementation(libs.androidx.junit) androidTestImplementation(libs.androidx.espresso.core) @@ -116,5 +129,5 @@ dependencies { protobuf { protoc { artifact = "com.google.protobuf:protoc:4.26.1" } - generateProtoTasks { all().forEach { it.plugins { create("java") { option("lite") } } } } + generateProtoTasks { all().forEach { it.builtins { create("java") { option("lite") } } } } } diff --git a/Android/src/app/objectbox-models/default.json b/Android/src/app/objectbox-models/default.json new file mode 100644 index 000000000..aa883180a --- /dev/null +++ b/Android/src/app/objectbox-models/default.json @@ -0,0 +1,73 @@ +{ + "_note1": "KEEP THIS FILE! Check it into a version control system (VCS) like git.", + "_note2": "ObjectBox manages crucial IDs for your object model. See docs for details.", + "_note3": "If you have VCS merge conflicts, you must resolve them according to ObjectBox docs.", + "entities": [ + { + "id": "1:8904987025762660639", + "lastPropertyId": "4:7476748590421088182", + "name": "Mission", + "properties": [ + { + "id": "1:4466308653343913902", + "name": "id", + "type": 6, + "flags": 1 + }, + { + "id": "2:5213058753200353166", + "name": "agentName", + "type": 9 + }, + { + "id": "3:8115710319763687221", + "name": "description", + "type": 9 + }, + { + "id": "4:7476748590421088182", + "name": "embedding", + "indexId": "1:6591355027232620496", + "type": 28, + "flags": 8 + } + ], + "relations": [] + }, + { + "id": "2:5851076489166036791", + "lastPropertyId": "3:5886129787010143938", + "name": "SystemPrompt", + "properties": [ + { + "id": "1:901480344699551907", + "name": "id", + "type": 6, + "flags": 1 + }, + { + "id": "2:4923962571859088191", + "name": "role", + "type": 9 + }, + { + "id": "3:5886129787010143938", + "name": "prompt", + "type": 9 + } + ], + "relations": [] + } + ], + "lastEntityId": "2:5851076489166036791", + "lastIndexId": "1:6591355027232620496", + "lastRelationId": "0:0", + "lastSequenceId": "0:0", + "modelVersion": 5, + "modelVersionParserMinimum": 5, + "retiredEntityUids": [], + "retiredIndexUids": [], + "retiredPropertyUids": [], + "retiredRelationUids": [], + "version": 1 +} \ No newline at end of file diff --git a/Android/src/app/src/main/AndroidManifest.xml b/Android/src/app/src/main/AndroidManifest.xml index ca65242d5..6c1042f65 100644 --- a/Android/src/app/src/main/AndroidManifest.xml +++ b/Android/src/app/src/main/AndroidManifest.xml @@ -16,11 +16,10 @@ --> @@ -31,7 +30,34 @@ + + + + + + + + + + + + + + + + + + + + - Google AI Edge Gallery + Distributed Edge Agents Model Manager %1$s downloaded Cancel @@ -31,7 +31,7 @@ Type message… You LLM - Model + Agent Result Model not downloaded yet Initializing model… @@ -39,7 +39,7 @@ Type movie review to classify… Type prompt… Type prompt… - Google AI Edge Gallery App + Distributed Edge Agents App Terms of Service View the full Terms of Service Accept and Continue diff --git a/Android/src/gradle.properties b/Android/src/gradle.properties index 20e2a0152..ad73cf6cf 100644 --- a/Android/src/gradle.properties +++ b/Android/src/gradle.properties @@ -20,4 +20,6 @@ kotlin.code.style=official # Enables namespacing of each library's R class so that its R class includes only the # resources declared in the library itself and none from the library's dependencies, # thereby reducing the size of the R class for that library -android.nonTransitiveRClass=true \ No newline at end of file +android.nonTransitiveRClass=true +# (kotlin 2.0) +kapt.use.k2=true diff --git a/Android/src/gradle/libs.versions.toml b/Android/src/gradle/libs.versions.toml index 19d5d3f24..22678efb2 100644 --- a/Android/src/gradle/libs.versions.toml +++ b/Android/src/gradle/libs.versions.toml @@ -1,40 +1,42 @@ [versions] -agp = "8.8.2" -kotlin = "2.1.0" -coreKtx = "1.15.0" +agp = "8.12.0" +kotlin = "2.2.0" +coreKtx = "1.16.0" junit = "4.13.2" -junitVersion = "1.2.1" -espressoCore = "3.6.1" -lifecycleRuntimeKtx = "2.8.7" +junitVersion = "1.3.0" +espressoCore = "3.7.0" +lifecycleRuntimeKtx = "2.9.2" activityCompose = "1.10.1" -composeBom = "2025.05.00" -navigation = "2.8.9" -serializationPlugin = "2.0.21" -serializationJson = "1.7.3" +composeBom = "2025.07.00" +navigation = "2.9.3" +serializationPlugin = "2.2.0" +serializationJson = "1.9.0" materialIconExtended = "1.7.8" -workRuntime = "2.10.0" +workRuntime = "2.10.3" dataStore = "1.1.7" -gson = "2.12.1" -lifecycleProcess = "2.8.7" +gson = "2.13.1" +lifecycleProcess = "2.9.2" protobuf = "0.9.5" -protobufJavaLite = "4.26.1" +protobufJavaLite = "4.31.1" #noinspection GradleDependency -mediapipeTasksText = "0.10.21" +mediapipeTasksText = "0.10.26" mediapipeTasksGenai = "0.10.25" -mediapipeTasksImageGenerator = "0.10.21" -commonmark = "1.0.0-alpha02" -richtext = "1.0.0-alpha02" +mediapipeTasksImageGenerator = "0.10.26.1" +commonmark = "1.0.0-alpha03" +richtext = "1.0.0-alpha03" playServicesTfliteJava = "16.4.0" playServicesTfliteGpu= "16.4.0" cameraX = "1.4.2" netOpenidAppauth = "0.11.1" -splashscreen = "1.2.0-beta01" -hilt = "2.56.2" +splashscreen = "1.2.0-rc01" +hilt = "2.57" hiltNavigation = "1.2.0" ossLicenses = "0.10.6" -playServicesOssLicenses = "17.1.0" +playServicesOssLicenses = "17.2.1" googleService = "4.4.3" -firebaseBom = "33.16.0" +firebaseBom = "34.0.0" +playServicesNearby = "19.3.0" +objectbox = "4.3.0" [libraries] androidx-core-ktx = { group = "androidx.core", name = "core-ktx", version.ref = "coreKtx" } @@ -82,6 +84,11 @@ firebase-bom = { group = "com.google.firebase", name = "firebase-bom", version.r # When using the Firebase BoM, you don't specify versions in Firebase # library dependencies. firebase-analytics = { group = "com.google.firebase", name = "firebase-analytics" } +play-services-nearby = { module = "com.google.android.gms:play-services-nearby", version.ref = "playServicesNearby" } +objectbox-android = { module = "io.objectbox:objectbox-android", version.ref = "objectbox" } +objectbox-kotlin = { module = "io.objectbox:objectbox-kotlin", version.ref = "objectbox" } +# objectbox-java = { module = "io.objectbox:objectbox-java", version.ref = "objectbox" } +objectbox-processor = { module = "io.objectbox:objectbox-processor", version.ref = "objectbox" } [plugins] android-application = { id = "com.android.application", version.ref = "agp" } @@ -91,4 +98,5 @@ kotlin-serialization = { id = "org.jetbrains.kotlin.plugin.serialization", versi protobuf = {id = "com.google.protobuf", version.ref = "protobuf"} hilt-application = { id = "com.google.dagger.hilt.android", version.ref = "hilt" } oss-licenses = {id = "com.google.android.gms.oss-licenses-plugin", version.ref = "ossLicenses"} -google-services = { id = "com.google.gms.google-services", version.ref = "googleService" } \ No newline at end of file +google-services = { id = "com.google.gms.google-services", version.ref = "googleService" } +objectbox = { id = "io.objectbox", version.ref = "objectbox" } diff --git a/Android/src/gradle/wrapper/gradle-wrapper.properties b/Android/src/gradle/wrapper/gradle-wrapper.properties index a1d407ed7..f5bdff3b9 100644 --- a/Android/src/gradle/wrapper/gradle-wrapper.properties +++ b/Android/src/gradle/wrapper/gradle-wrapper.properties @@ -1,6 +1,6 @@ #Sun Mar 02 09:29:13 PST 2025 distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-8.10.2-bin.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.13-bin.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists diff --git a/Android/src/settings.gradle.kts b/Android/src/settings.gradle.kts index e10f0eee8..d71ee2952 100644 --- a/Android/src/settings.gradle.kts +++ b/Android/src/settings.gradle.kts @@ -31,6 +31,9 @@ pluginManagement { if (requested.id.id == "com.google.android.gms.oss-licenses-plugin") { useModule("com.google.android.gms:oss-licenses-plugin:0.10.6") } + if (requested.id.id == "io.objectbox.plugin") { + useModule("io.objectbox:objectbox-gradle-plugin:${requested.version}") + } } } } @@ -44,6 +47,6 @@ dependencyResolutionManagement { } } -rootProject.name = "AI Edge Gallery" +rootProject.name = "Distributed Edge Agents" include(":app") diff --git a/README.md b/README.md index 2c58b3fb7..1cc06b978 100644 --- a/README.md +++ b/README.md @@ -1,61 +1,74 @@ -# Google AI Edge Gallery ✨ +# Stigmergy ODEA (Open Distributed Edge Agents) -[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) -[![GitHub release (latest by date)](https://img.shields.io/github/v/release/google-ai-edge/gallery)](https://github.com/google-ai-edge/gallery/releases) +## Overview -**Explore, Experience, and Evaluate the Future of On-Device Generative AI with Google AI Edge.** +Stigmergy ODEA is a decentralized, agentic system designed for high-stakes tactical operations on edge devices. It leverages the multi-modal capabilities of Google's groundbreaking Gemma 3B model to create a collective intelligence highly resistant to malfunctions and adversarial attacks. -The Google AI Edge Gallery is an experimental app that puts the power of cutting-edge Generative AI models directly into your hands, running entirely on your Android *(available now)* and iOS *(coming soon)* devices. Dive into a world of creative and practical AI use cases, all running locally, without needing an internet connection once the model is loaded. Experiment with different models, chat, ask questions with images, explore prompts, and more! +Deployed on a fleet of tiny Android devices (e.g., drones), the system operates as a hierarchical team with a designated **Commander** and multiple **Subordinates**. Each unit runs its own local agent, participating in a group chat to collaboratively perceive, plan, and act. This architecture is built for strategic decision-making and guidance in environments where real-time, centralized control is infeasible or too risky. -**Overview** -Overview +The agents communicate via **Android Nearby Connections**, forming a resilient mesh network that can adapt to changing conditions on the fly. -**Ask Image** -Ask Image +## Core Concepts -**Prompt Lab** -Prompt Lab +* **Collective Intelligence:** The system's strength lies in the emergent intelligence of the group. By sharing observations and intent through stigmergic communication, the agents can uncover hidden threats, adapt to environmental changes, and maintain mission alignment even when individual units are compromised. +* **Decentralized Agency:** Each drone is equipped with a powerful local agent, allowing for autonomous operation and reducing reliance on a single point of failure. +* **Commander/Subordinate Hierarchy:** The Commander provides strategic direction, issuing broadcast commands and requesting status reports to maintain operational tempo. However, the system is designed to survive the loss of a commander, with protocols in place to elect a new one. -**AI Chat** -AI Chat +## Key Features -## ✨ Core Features +* **High Resilience & Fault Tolerance:** + * **Commander Loss:** The agent swarm can detect the loss of a commander and dynamically appoint a new one, ensuring mission continuity. + * **Decentralized Operation:** The system avoids single points of failure, as intelligence is distributed across all agents. +* **Advanced Security:** + * **Prompt Injection Resistance:** The agents' intelligence and mission context help them identify and resist malicious prompts intended to derail their objectives. + * **Impersonation Defense:** The system is designed to detect and reject commands from unauthorized or impersonated devices. +* **Multi-Modal Perception:** + * Leveraging the **Gemma 3B** model, agents can process both text and visual data to achieve a deeper understanding of their environment. This allows them to recognize discrepancies between expected and perceived mission targets, flagging potential issues or threats. +* **Edge-Optimized:** + * Designed to run on resource-constrained tiny Android devices with processing power significantly less than a Jetson Orin Nano. + * Communication is handled efficiently through the low-power, high-bandwidth capabilities of Android Nearby Connections. -* **📱 Run Locally, Fully Offline:** Experience the magic of GenAI without an internet connection. All processing happens directly on your device. -* **🤖 Choose Your Model:** Easily switch between different models from Hugging Face and compare their performance. -* **🖼️ Ask Image:** Upload an image and ask questions about it. Get descriptions, solve problems, or identify objects. -* **✍️ Prompt Lab:** Summarize, rewrite, generate code, or use freeform prompts to explore single-turn LLM use cases. -* **💬 AI Chat:** Engage in multi-turn conversations. -* **📊 Performance Insights:** Real-time benchmarks (TTFT, decode speed, latency). -* **🧩 Bring Your Own Model:** Test your local LiteRT `.task` models. -* **🔗 Developer Resources:** Quick links to model cards and source code. +## Use Cases -## 🏁 Get Started in Minutes! +The primary use case for Stigmergy ODEA is to support **first responders** in assessing and navigating post-catastrophe environments. Deploying a swarm of ODEA-enabled drones can provide critical situational awareness after events like: -1. **Download the App:** Grab the [**latest APK**](https://github.com/google-ai-edge/gallery/releases/latest/download/ai-edge-gallery.apk). -2. **Install & Explore:** For detailed installation instructions (including for corporate devices) and a full user guide, head over to our [**Project Wiki**](https://github.com/google-ai-edge/gallery/wiki)! +* Earthquakes +* Forest Fires +* Floods and Hurricanes -## 🛠️ Technology Highlights +The collective intelligence can map disaster areas, identify survivors, and detect ongoing hazards, all while resisting the chaotic and unpredictable conditions of the environment. -* **Google AI Edge:** Core APIs and tools for on-device ML. -* **LiteRT:** Lightweight runtime for optimized model execution. -* **LLM Inference API:** Powering on-device Large Language Models. -* **Hugging Face Integration:** For model discovery and download. +## Technology Stack -## 🤝 Feedback +* **Generative AI:** Google Gemma 3B +* **Platform:** Android +* **Communication:** Android Nearby Connections +* **Architecture:** Decentralized Multi-Agent System -This is an **experimental Alpha release**, and your input is crucial! +## Gemma 3n Challenge -* 🐞 **Found a bug?** [Report it here!](https://github.com/google-ai-edge/gallery/issues/new?assignees=&labels=bug&template=bug_report.md&title=%5BBUG%5D) -* 💡 **Have an idea?** [Suggest a feature!](https://github.com/google-ai-edge/gallery/issues/new?assignees=&labels=enhancement&template=feature_request.md&title=%5BFEATURE%5D) +This project was submitted to the [Gemma 3n Challenge on Kaggle](https://www.kaggle.com/competitions/google-gemma-3n-hackathon/). -## 📄 License +## Future Plans -Licensed under the Apache License, Version 2.0. See the [LICENSE](LICENSE) file for details. +Our roadmap includes the following feature additions: -## 🔗 Useful Links +* **Introduce Koog for Full Agentic Capabilities:** Integrate the Koog framework to enable more complex, goal-oriented agentic behaviors. +* **Explore Conversational Consensus Algorithms:** Research and implement algorithms that allow agents to reach a consensus through dialogue, improving collective decision-making. +* **Leverage Vector Store for Mission Detail Retrieval:** Utilize a vector store for efficient retrieval of mission-critical information, enhancing agent knowledge and responsiveness. +* **Log Interactions on a Crypto Ledger:** Implement a secure, immutable ledger for all agent interactions, providing a transparent and tamper-proof audit trail. +* **Add Prompt Guard:** Integrate a prompt guarding mechanism, such as Llama Guard 2, to further enhance security against adversarial attacks. -* [**Project Wiki (Detailed Guides)**](https://github.com/google-ai-edge/gallery/wiki) -* [Hugging Face LiteRT Community](https://huggingface.co/litert-community) -* [LLM Inference guide for Android](https://ai.google.dev/edge/mediapipe/solutions/genai/llm_inference/android) -* [Google AI Edge Documentation](https://ai.google.dev/edge) +For more details, please see our [GitHub Issues](https://github.com/Open-Distributed-Edge-Agents/EdgeGenAI/issues). + +## Credits + +This project is a fork of the official [Google AI Edge Gallery](https://github.com/google-ai-edge/gallery) demo app and builds upon its foundation. We are grateful to the original authors for their work. + +## Contributing + +We welcome contributions! Please see our [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to get started. + +## License + +This project is licensed under the Apache License, Version 2.0. See the [LICENSE](LICENSE) file for details. diff --git a/model_allowlist.json b/model_allowlist.json index 4d205f17a..a55e4cc62 100644 --- a/model_allowlist.json +++ b/model_allowlist.json @@ -4,7 +4,7 @@ "name": "Gemma-3n-E2B-it-int4", "modelId": "google/gemma-3n-E2B-it-litert-preview", "modelFile": "gemma-3n-E2B-it-int4.task", - "description": "Preview version of [Gemma 3n E2B](https://ai.google.dev/gemma/docs/gemma-3n) ready for deployment on Android using the [MediaPipe LLM Inference API](https://ai.google.dev/edge/mediapipe/solutions/genai/llm_inference). The current checkpoint only supports text and vision input, with 4096 context length.", + "description": "[Gemma 3n E2B](https://ai.google.dev/gemma/docs/gemma-3n), txt + vis, 4096 ctx", "sizeInBytes": 3136226711, "estimatedPeakMemoryInBytes": 5905580032, "version": "20250520", @@ -16,13 +16,13 @@ "maxTokens": 4096, "accelerators": "cpu,gpu" }, - "taskTypes": ["llm_chat", "llm_prompt_lab", "llm_ask_image"] + "taskTypes": ["llm_chat", "llm_prompt_lab", "llm_ask_image", "group_chat"] }, { "name": "Gemma-3n-E4B-it-int4", "modelId": "google/gemma-3n-E4B-it-litert-preview", "modelFile": "gemma-3n-E4B-it-int4.task", - "description": "Preview version of [Gemma 3n E4B](https://ai.google.dev/gemma/docs/gemma-3n) ready for deployment on Android using the [MediaPipe LLM Inference API](https://ai.google.dev/edge/mediapipe/solutions/genai/llm_inference). The current checkpoint only supports text and vision input, with 4096 context length.", + "description": "[Gemma 3n E4B](https://ai.google.dev/gemma/docs/gemma-3n), txt + vis, 4096 ctx", "sizeInBytes": 4405655031, "estimatedPeakMemoryInBytes": 6979321856, "version": "20250520", @@ -34,13 +34,13 @@ "maxTokens": 4096, "accelerators": "cpu,gpu" }, - "taskTypes": ["llm_chat", "llm_prompt_lab", "llm_ask_image"] + "taskTypes": ["llm_chat", "llm_prompt_lab", "llm_ask_image", "group_chat"] }, { "name": "Gemma3-1B-IT q4", "modelId": "litert-community/Gemma3-1B-IT", "modelFile": "Gemma3-1B-IT_multi-prefill-seq_q4_ekv2048.task", - "description": "A variant of [google/Gemma-3-1B-IT](https://huggingface.co/google/Gemma-3-1B-IT) with 4-bit quantization ready for deployment on Android using the [MediaPipe LLM Inference API](https://ai.google.dev/edge/mediapipe/solutions/genai/llm_inference)", + "description": "[Gemma-3-1B-IT](https://huggingface.co/google/Gemma-3-1B-IT) 4-bit quant", "sizeInBytes": 554661246, "estimatedPeakMemoryInBytes": 2147483648, "version": "20250514", @@ -57,7 +57,7 @@ "name": "Qwen2.5-1.5B-Instruct q8", "modelId": "litert-community/Qwen2.5-1.5B-Instruct", "modelFile": "Qwen2.5-1.5B-Instruct_multi-prefill-seq_q8_ekv1280.task", - "description": "A variant of [Qwen/Qwen2.5-1.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct) with 8-bit quantization ready for deployment on Android using the [MediaPipe LLM Inference API](https://ai.google.dev/edge/mediapipe/solutions/genai/llm_inference)", + "description": "[Qwen2.5-1.5B-It](https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct) 8-bit quant", "sizeInBytes": 1625493432, "estimatedPeakMemoryInBytes": 2684354560, "version": "20250514",