Spaces:
Running
Running
| // Copyright 2025 The ODML Authors. | |
| // | |
| // Licensed under the Apache License, Version 2.0 (the "License"); | |
| // you may not use this file except in compliance with the License. | |
| // You may obtain a copy of the License at | |
| // | |
| // http://www.apache.org/licenses/LICENSE-2.0 | |
| // | |
| // Unless required by applicable law or agreed to in writing, software | |
| // distributed under the License is distributed on an "AS IS" BASIS, | |
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| // See the License for the specific language governing permissions and | |
| // limitations under the License. | |
| namespace litert { | |
| namespace lm { | |
| namespace { | |
| using ::testing::status::StatusIs; | |
| TEST(LiteRtLmLibTest, RunLiteRtLmWithEmptyModelPathReturnsError) { | |
| LiteRtLmSettings settings; | |
| settings.model_path = ""; | |
| EXPECT_THAT(RunLiteRtLm(settings), | |
| StatusIs(absl::StatusCode::kInvalidArgument)); | |
| } | |
| // Following tests are for various model file metadata and tokenizer types. | |
| // They are not exhaustive, but designed to test a variety of scenarios. | |
| // If metadata or tokenizer types are not handled properly, these tests could | |
| // fail. | |
| TEST(LiteRtLmLibTest, RunLiteRtLmWithValidModelPath) { | |
| const auto model_path = | |
| std::filesystem::path(::testing::SrcDir()) / | |
| "litert_lm/runtime/testdata/test_lm.litertlm"; | |
| LiteRtLmSettings settings; | |
| settings.model_path = model_path.string(); | |
| settings.backend = "cpu"; | |
| // To save time on testing, and make sure we can end gracefully with this | |
| // test litertlm file, we only run 32 tokens. | |
| settings.max_num_tokens = 32; | |
| EXPECT_OK(RunLiteRtLm(settings)); | |
| } | |
| TEST(LiteRtLmLibTest, RunLiteRtLmWithInferredGemma3ModelType) { | |
| const auto model_path = | |
| std::filesystem::path(::testing::SrcDir()) / | |
| "litert_lm/runtime/testdata/test_lm_no_model_type.litertlm"; | |
| LiteRtLmSettings settings; | |
| settings.model_path = model_path.string(); | |
| settings.backend = "cpu"; | |
| // To save time on testing, and make sure we can end gracefully with this | |
| // test litertlm file, we only run 32 tokens. | |
| settings.max_num_tokens = 32; | |
| EXPECT_OK(RunLiteRtLm(settings)); | |
| } | |
| TEST(LiteRtLmLibTest, RunLiteRtLmWithDeepseekMetadataTokenizer) { | |
| const auto model_path = | |
| std::filesystem::path(::testing::SrcDir()) / | |
| "litert_lm/runtime/testdata/test_lm_deepseek_metadata_tokenizer.litertlm"; | |
| LiteRtLmSettings settings; | |
| settings.model_path = model_path.string(); | |
| settings.backend = "cpu"; | |
| // To save time on testing, and make sure we can end gracefully with this | |
| // test litertlm file, we only run 32 tokens. | |
| settings.max_num_tokens = 32; | |
| EXPECT_OK(RunLiteRtLm(settings)); | |
| } | |
| } // namespace | |
| } // namespace lm | |
| } // namespace litert | |