embeddinggemma-300m-litertlm / embeddinggemma-300m.toml
macmacmacmac's picture
Upload embeddinggemma-300m.toml with huggingface_hub
96fa469 verified
# EmbeddingGemma 300M LiteRT-LM Configuration
# Use this with the litertlm_builder_cli to create .litertlm file
#
# Usage:
# cd deps/LiteRT-LM
# bazel run //schema/py:litertlm_builder_cli -- \
# toml --path ../../models/embeddinggemma-300m.toml \
# output --path ../../models/embeddinggemma-300m.litertlm
[system_metadata]
entries = [
{ key = "model_name", value_type = "String", value = "EmbeddingGemma-300M" },
{ key = "model_version", value_type = "String", value = "1.0" },
{ key = "embedding_dim", value_type = "Int32", value = 256 },
{ key = "author", value_type = "String", value = "Google" }
]
# Section 1: TFLite Embedder Model
[[section]]
section_type = "TFLiteModel"
model_type = "EMBEDDER"
# Use the seq512 version (best for tool descriptions)
data_path = "/home/mac/git/mcp-agent/models/embeddinggemma-300M_seq512_mixed-precision.tflite"
additional_metadata = [
{ key = "embedding_dimensions", value_type = "Int32", value = 256 },
{ key = "max_seq_length", value_type = "Int32", value = 512 }
]
# Section 2: HuggingFace Tokenizer (if available)
# Uncomment if you have the tokenizer.json
# [[section]]
# section_type = "HF_Tokenizer"
# data_path = "tokenizer.json"