diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/activations.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/activations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27b269621fe518427c0b070b36e99f9d32ad709a Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/activations.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/activations_tf.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/activations_tf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1eca192cd7067a22e5c12fc574596207fa2871c Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/activations_tf.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/audio_utils.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/audio_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0ee4e1d210cd32c2532da06691bf48884eb1498 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/audio_utils.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/cache_utils.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/cache_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c51f5d19c8dd490662cf176434381f7cb80a9c9 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/cache_utils.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/configuration_utils.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/configuration_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49e4ce3b0ebfc6e2dac153b372fa0ce542ff49a7 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/configuration_utils.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/convert_graph_to_onnx.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/convert_graph_to_onnx.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70588df5f0ad49b9eed986902bb74bf05414a623 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/convert_graph_to_onnx.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/convert_pytorch_checkpoint_to_tf2.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/convert_pytorch_checkpoint_to_tf2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c127c8efa346e463df2b25bd77a58276f35dfbe Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/convert_pytorch_checkpoint_to_tf2.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/convert_slow_tokenizer.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/convert_slow_tokenizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79583f887e124a74d1b44b7fa085210c7a6801c5 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/convert_slow_tokenizer.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/convert_slow_tokenizers_checkpoints_to_fast.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/convert_slow_tokenizers_checkpoints_to_fast.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..886e935c66a1186eebf1fb47f1ec365af09ceb99 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/convert_slow_tokenizers_checkpoints_to_fast.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/debug_utils.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/debug_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1396e3796eac35336c128d2448b3b11505237d6f Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/debug_utils.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/deepspeed.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/deepspeed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e79b3af2d5450b942616e247316d2789d8d4653 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/deepspeed.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/dependency_versions_check.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/dependency_versions_check.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52f84d58f6c49e09cc051a252a89b736bb68ba95 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/dependency_versions_check.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/dynamic_module_utils.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/dynamic_module_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..635418c2499dd1ba1e1986cf89e2a6c82e901768 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/dynamic_module_utils.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/feature_extraction_sequence_utils.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/feature_extraction_sequence_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb3ee841b20e185c0ca20de68218a361682edfd4 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/feature_extraction_sequence_utils.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/feature_extraction_utils.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/feature_extraction_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c0e9cfc151e80b4b8b8bae816b153b2a475de2c Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/feature_extraction_utils.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/generation_flax_utils.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/generation_flax_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e46b326623338a83ba23027184032a4009d46f8c Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/generation_flax_utils.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/generation_tf_utils.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/generation_tf_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55d74b211eb9c74b350be84294ed581a6bfa2777 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/generation_tf_utils.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/hf_argparser.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/hf_argparser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41f38c6bfdd029490362b32d10c7525390cc2970 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/hf_argparser.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/hyperparameter_search.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/hyperparameter_search.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65b62ed20284d604eadbfaebc7b78670ed459c9a Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/hyperparameter_search.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/image_utils.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/image_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e25126920507a10f7397c9dd45db8d78393a4214 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/image_utils.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/keras_callbacks.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/keras_callbacks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3896aff38d0020d945608204da0773efab5b94ab Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/keras_callbacks.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/modelcard.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/modelcard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ce7e7ad8dc7e7f83c4b6c8463b4a20bca1b016c Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/modelcard.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/modeling_flax_outputs.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/modeling_flax_outputs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7491546b0fb3b4c524b1597e3126d19238ab671f Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/modeling_flax_outputs.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/modeling_flax_pytorch_utils.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/modeling_flax_pytorch_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82ee2efe02e8c398af9858601ae64494a487b917 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/modeling_flax_pytorch_utils.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/modeling_flax_utils.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/modeling_flax_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f4fc32fef5a0defd913d8d3258b63494d074bb8 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/modeling_flax_utils.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/modeling_tf_outputs.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/modeling_tf_outputs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a39b3c977b1dedd87fa9f38dca55fe770fab118 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/modeling_tf_outputs.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/modeling_tf_pytorch_utils.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/modeling_tf_pytorch_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7cf454a30a995e5153fb41560ffa4f3f4768296 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/modeling_tf_pytorch_utils.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/optimization.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/optimization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fdc3933d22b4cbefa5a68a8330923b56a9f3b834 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/optimization.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/processing_utils.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/processing_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b69e377de7718751bd54bcca5003029855c1358 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/processing_utils.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/pytorch_utils.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/pytorch_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c6ea1ac4212c3758323358e552384f28f5ba9c9 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/pytorch_utils.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/safetensors_conversion.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/safetensors_conversion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8786c7083c05a5e2563880a4b7475fe9d8c61aee Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/safetensors_conversion.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/testing_utils.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/testing_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3f6d24a5edbe1281b3157564f264e49c5170f4d Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/testing_utils.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/tf_utils.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/tf_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1720d8eddf293d71769e4f1d099f0e9f23e1bb9f Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/tf_utils.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3477b1717929d7f8571feea8f69bf3fe63762d66 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils_fast.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils_fast.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2bc3e76d58ab34a35950bacdafa059984a83073a Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils_fast.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/trainer_utils.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/trainer_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b0e1ae21b99381011c3221ae625ae84fb5eedc2 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/trainer_utils.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/training_args_seq2seq.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/training_args_seq2seq.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f9d6c54dc1f5534f026a6c90eb52663758bc0c1 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/training_args_seq2seq.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/training_args_tf.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/training_args_tf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..feabaa94e91f065e962d69f15018cf50c7db138e Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/training_args_tf.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__init__.py b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..68d66eb275e0b6fef2db1cdda810fe11e360aba9 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__init__.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python +# coding=utf-8 + +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ..utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_torch_available, +) + + +_import_structure = { + "agents": ["Agent", "AzureOpenAiAgent", "HfAgent", "LocalAgent", "OpenAiAgent"], + "base": ["PipelineTool", "RemoteTool", "Tool", "launch_gradio_demo", "load_tool"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["document_question_answering"] = ["DocumentQuestionAnsweringTool"] + _import_structure["image_captioning"] = ["ImageCaptioningTool"] + _import_structure["image_question_answering"] = ["ImageQuestionAnsweringTool"] + _import_structure["image_segmentation"] = ["ImageSegmentationTool"] + _import_structure["speech_to_text"] = ["SpeechToTextTool"] + _import_structure["text_classification"] = ["TextClassificationTool"] + _import_structure["text_question_answering"] = ["TextQuestionAnsweringTool"] + _import_structure["text_summarization"] = ["TextSummarizationTool"] + _import_structure["text_to_speech"] = ["TextToSpeechTool"] + _import_structure["translation"] = ["TranslationTool"] + +if TYPE_CHECKING: + from .agents import Agent, AzureOpenAiAgent, HfAgent, LocalAgent, OpenAiAgent + from .base import PipelineTool, RemoteTool, Tool, launch_gradio_demo, load_tool + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .document_question_answering import DocumentQuestionAnsweringTool + from .image_captioning import ImageCaptioningTool + from .image_question_answering import ImageQuestionAnsweringTool + from .image_segmentation import ImageSegmentationTool + from .speech_to_text import SpeechToTextTool + from .text_classification import TextClassificationTool + from .text_question_answering import TextQuestionAnsweringTool + from .text_summarization import TextSummarizationTool + from .text_to_speech import TextToSpeechTool + from .translation import TranslationTool +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/__init__.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41a2101918ce1e937b77e2e7dca2d5d509ac8b93 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/agent_types.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/agent_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1698715dd32e6ffe67ac253f38360239e2e46409 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/agent_types.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/agents.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/agents.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03d7f7fd0fff088334c0efcc84417f1b6dcbba9c Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/agents.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/base.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2bc5297adb01a185994fd2974651de5596ea0f08 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/base.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/document_question_answering.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/document_question_answering.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..acfab2ae59048236dd42faa70e8dc57e4122c9ab Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/document_question_answering.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/evaluate_agent.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/evaluate_agent.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1692dfbc72dc6e4aa0b38f8875ab6a53a84daac Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/evaluate_agent.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/image_captioning.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/image_captioning.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1294c4008ec709242e90d52cfef5e6e67cba29f Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/image_captioning.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/image_question_answering.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/image_question_answering.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28568693436d44c9539b1bfbf14fecce3f8d500e Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/image_question_answering.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/image_segmentation.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/image_segmentation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71199e45242a374ece86076623c3e66743071d46 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/image_segmentation.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/prompts.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/prompts.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..349409c64311bb7ece13da4fb5f8b3ae6640bcc8 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/prompts.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/python_interpreter.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/python_interpreter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b471e682437ba8d64302906b4bccd3304de3303d Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/python_interpreter.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/speech_to_text.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/speech_to_text.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0da369b0023fe5f97218c68b4da4c3a34b79b44f Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/speech_to_text.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/text_classification.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/text_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18feca8c70cec0f3fe900fa78b24dccc5c4da3d8 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/text_classification.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/text_question_answering.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/text_question_answering.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f855435f6c05fe6a98087b830c442816302ccce Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/text_question_answering.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/text_summarization.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/text_summarization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e350d5f5dc68507ca9314fb4260ca3fe375ff17b Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/text_summarization.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/text_to_speech.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/text_to_speech.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..663f7e1a839838d81c604cffe87d3ca32c9e1b65 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/text_to_speech.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/translation.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/translation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4d94b4744d2d2e95ad70721e07ee1fb80dd250b Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/__pycache__/translation.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/agent_types.py b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/agent_types.py new file mode 100644 index 0000000000000000000000000000000000000000..f1c3261d57cacc0d0299467f0fa566340e4b5a94 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/agent_types.py @@ -0,0 +1,277 @@ +# coding=utf-8 +# Copyright 2023 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import pathlib +import tempfile +import uuid + +import numpy as np + +from ..utils import is_soundfile_availble, is_torch_available, is_vision_available, logging + + +logger = logging.get_logger(__name__) + +if is_vision_available(): + import PIL.Image + from PIL import Image + from PIL.Image import Image as ImageType +else: + ImageType = object + +if is_torch_available(): + import torch + +if is_soundfile_availble(): + import soundfile as sf + + +class AgentType: + """ + Abstract class to be reimplemented to define types that can be returned by agents. + + These objects serve three purposes: + + - They behave as they were the type they're meant to be, e.g., a string for text, a PIL.Image for images + - They can be stringified: str(object) in order to return a string defining the object + - They should be displayed correctly in ipython notebooks/colab/jupyter + """ + + def __init__(self, value): + self._value = value + + def __str__(self): + return self.to_string() + + def to_raw(self): + logger.error( + "This is a raw AgentType of unknown type. Display in notebooks and string conversion will be unreliable" + ) + return self._value + + def to_string(self) -> str: + logger.error( + "This is a raw AgentType of unknown type. Display in notebooks and string conversion will be unreliable" + ) + return str(self._value) + + +class AgentText(AgentType, str): + """ + Text type returned by the agent. Behaves as a string. + """ + + def to_raw(self): + return self._value + + def to_string(self): + return self._value + + +class AgentImage(AgentType, ImageType): + """ + Image type returned by the agent. Behaves as a PIL.Image. + """ + + def __init__(self, value): + super().__init__(value) + + if not is_vision_available(): + raise ImportError("PIL must be installed in order to handle images.") + + self._path = None + self._raw = None + self._tensor = None + + if isinstance(value, ImageType): + self._raw = value + elif isinstance(value, (str, pathlib.Path)): + self._path = value + elif isinstance(value, torch.Tensor): + self._tensor = value + else: + raise ValueError(f"Unsupported type for {self.__class__.__name__}: {type(value)}") + + def _ipython_display_(self, include=None, exclude=None): + """ + Displays correctly this type in an ipython notebook (ipython, colab, jupyter, ...) + """ + from IPython.display import Image, display + + display(Image(self.to_string())) + + def to_raw(self): + """ + Returns the "raw" version of that object. In the case of an AgentImage, it is a PIL.Image. + """ + if self._raw is not None: + return self._raw + + if self._path is not None: + self._raw = Image.open(self._path) + return self._raw + + def to_string(self): + """ + Returns the stringified version of that object. In the case of an AgentImage, it is a path to the serialized + version of the image. + """ + if self._path is not None: + return self._path + + if self._raw is not None: + directory = tempfile.mkdtemp() + self._path = os.path.join(directory, str(uuid.uuid4()) + ".png") + self._raw.save(self._path) + + return self._path + + if self._tensor is not None: + array = self._tensor.cpu().detach().numpy() + + # There is likely simpler than load into image into save + img = Image.fromarray((array * 255).astype(np.uint8)) + + directory = tempfile.mkdtemp() + self._path = os.path.join(directory, str(uuid.uuid4()) + ".png") + + img.save(self._path) + + return self._path + + +class AgentAudio(AgentType): + """ + Audio type returned by the agent. + """ + + def __init__(self, value, samplerate=16_000): + super().__init__(value) + + if not is_soundfile_availble(): + raise ImportError("soundfile must be installed in order to handle audio.") + + self._path = None + self._tensor = None + + self.samplerate = samplerate + + if isinstance(value, (str, pathlib.Path)): + self._path = value + elif isinstance(value, torch.Tensor): + self._tensor = value + else: + raise ValueError(f"Unsupported audio type: {type(value)}") + + def _ipython_display_(self, include=None, exclude=None): + """ + Displays correctly this type in an ipython notebook (ipython, colab, jupyter, ...) + """ + from IPython.display import Audio, display + + display(Audio(self.to_string(), rate=self.samplerate)) + + def to_raw(self): + """ + Returns the "raw" version of that object. It is a `torch.Tensor` object. + """ + if self._tensor is not None: + return self._tensor + + if self._path is not None: + tensor, self.samplerate = sf.read(self._path) + self._tensor = torch.tensor(tensor) + return self._tensor + + def to_string(self): + """ + Returns the stringified version of that object. In the case of an AgentAudio, it is a path to the serialized + version of the audio. + """ + if self._path is not None: + return self._path + + if self._tensor is not None: + directory = tempfile.mkdtemp() + self._path = os.path.join(directory, str(uuid.uuid4()) + ".wav") + sf.write(self._path, self._tensor, samplerate=self.samplerate) + return self._path + + +AGENT_TYPE_MAPPING = {"text": AgentText, "image": AgentImage, "audio": AgentAudio} +INSTANCE_TYPE_MAPPING = {str: AgentText} + +if is_vision_available(): + INSTANCE_TYPE_MAPPING[PIL.Image] = AgentImage + + +def handle_agent_inputs(*args, **kwargs): + args = [(arg.to_raw() if isinstance(arg, AgentType) else arg) for arg in args] + kwargs = {k: (v.to_raw() if isinstance(v, AgentType) else v) for k, v in kwargs.items()} + return args, kwargs + + +def handle_agent_outputs(outputs, output_types=None): + if isinstance(outputs, dict): + decoded_outputs = {} + for i, (k, v) in enumerate(outputs.items()): + if output_types is not None: + # If the class has defined outputs, we can map directly according to the class definition + if output_types[i] in AGENT_TYPE_MAPPING: + decoded_outputs[k] = AGENT_TYPE_MAPPING[output_types[i]](v) + else: + decoded_outputs[k] = AgentType(v) + + else: + # If the class does not have defined output, then we map according to the type + for _k, _v in INSTANCE_TYPE_MAPPING.items(): + if isinstance(v, _k): + decoded_outputs[k] = _v(v) + if k not in decoded_outputs: + decoded_outputs[k] = AgentType[v] + + elif isinstance(outputs, (list, tuple)): + decoded_outputs = type(outputs)() + for i, v in enumerate(outputs): + if output_types is not None: + # If the class has defined outputs, we can map directly according to the class definition + if output_types[i] in AGENT_TYPE_MAPPING: + decoded_outputs.append(AGENT_TYPE_MAPPING[output_types[i]](v)) + else: + decoded_outputs.append(AgentType(v)) + else: + # If the class does not have defined output, then we map according to the type + found = False + for _k, _v in INSTANCE_TYPE_MAPPING.items(): + if isinstance(v, _k): + decoded_outputs.append(_v(v)) + found = True + + if not found: + decoded_outputs.append(AgentType(v)) + + else: + if output_types[0] in AGENT_TYPE_MAPPING: + # If the class has defined outputs, we can map directly according to the class definition + decoded_outputs = AGENT_TYPE_MAPPING[output_types[0]](outputs) + + else: + # If the class does not have defined output, then we map according to the type + for _k, _v in INSTANCE_TYPE_MAPPING.items(): + if isinstance(outputs, _k): + return _v(outputs) + return AgentType(outputs) + + return decoded_outputs diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/agents.py b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/agents.py new file mode 100644 index 0000000000000000000000000000000000000000..3e423ebb30556d3a73330dec3316d066ca4d44dd --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/agents.py @@ -0,0 +1,771 @@ +#!/usr/bin/env python +# coding=utf-8 + +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import importlib.util +import json +import os +import time +from dataclasses import dataclass +from typing import Dict + +import requests +from huggingface_hub import HfFolder, hf_hub_download, list_spaces + +from ..models.auto import AutoTokenizer +from ..utils import is_offline_mode, is_openai_available, is_torch_available, logging +from .base import TASK_MAPPING, TOOL_CONFIG_FILE, Tool, load_tool, supports_remote +from .prompts import CHAT_MESSAGE_PROMPT, download_prompt +from .python_interpreter import evaluate + + +logger = logging.get_logger(__name__) + + +if is_openai_available(): + import openai + +if is_torch_available(): + from ..generation import StoppingCriteria, StoppingCriteriaList + from ..models.auto import AutoModelForCausalLM +else: + StoppingCriteria = object + +_tools_are_initialized = False + + +BASE_PYTHON_TOOLS = { + "print": print, + "range": range, + "float": float, + "int": int, + "bool": bool, + "str": str, +} + + +@dataclass +class PreTool: + task: str + description: str + repo_id: str + + +HUGGINGFACE_DEFAULT_TOOLS = {} + + +HUGGINGFACE_DEFAULT_TOOLS_FROM_HUB = [ + "image-transformation", + "text-download", + "text-to-image", + "text-to-video", +] + + +def get_remote_tools(organization="huggingface-tools"): + if is_offline_mode(): + logger.info("You are in offline mode, so remote tools are not available.") + return {} + + spaces = list_spaces(author=organization) + tools = {} + for space_info in spaces: + repo_id = space_info.id + resolved_config_file = hf_hub_download(repo_id, TOOL_CONFIG_FILE, repo_type="space") + with open(resolved_config_file, encoding="utf-8") as reader: + config = json.load(reader) + + task = repo_id.split("/")[-1] + tools[config["name"]] = PreTool(task=task, description=config["description"], repo_id=repo_id) + + return tools + + +def _setup_default_tools(): + global HUGGINGFACE_DEFAULT_TOOLS + global _tools_are_initialized + + if _tools_are_initialized: + return + + main_module = importlib.import_module("transformers") + tools_module = main_module.tools + + remote_tools = get_remote_tools() + for task_name, tool_class_name in TASK_MAPPING.items(): + tool_class = getattr(tools_module, tool_class_name) + description = tool_class.description + HUGGINGFACE_DEFAULT_TOOLS[tool_class.name] = PreTool(task=task_name, description=description, repo_id=None) + + if not is_offline_mode(): + for task_name in HUGGINGFACE_DEFAULT_TOOLS_FROM_HUB: + found = False + for tool_name, tool in remote_tools.items(): + if tool.task == task_name: + HUGGINGFACE_DEFAULT_TOOLS[tool_name] = tool + found = True + break + + if not found: + raise ValueError(f"{task_name} is not implemented on the Hub.") + + _tools_are_initialized = True + + +def resolve_tools(code, toolbox, remote=False, cached_tools=None): + if cached_tools is None: + resolved_tools = BASE_PYTHON_TOOLS.copy() + else: + resolved_tools = cached_tools + for name, tool in toolbox.items(): + if name not in code or name in resolved_tools: + continue + + if isinstance(tool, Tool): + resolved_tools[name] = tool + else: + task_or_repo_id = tool.task if tool.repo_id is None else tool.repo_id + _remote = remote and supports_remote(task_or_repo_id) + resolved_tools[name] = load_tool(task_or_repo_id, remote=_remote) + + return resolved_tools + + +def get_tool_creation_code(code, toolbox, remote=False): + code_lines = ["from transformers import load_tool", ""] + for name, tool in toolbox.items(): + if name not in code or isinstance(tool, Tool): + continue + + task_or_repo_id = tool.task if tool.repo_id is None else tool.repo_id + line = f'{name} = load_tool("{task_or_repo_id}"' + if remote: + line += ", remote=True" + line += ")" + code_lines.append(line) + + return "\n".join(code_lines) + "\n" + + +def clean_code_for_chat(result): + lines = result.split("\n") + idx = 0 + while idx < len(lines) and not lines[idx].lstrip().startswith("```"): + idx += 1 + explanation = "\n".join(lines[:idx]).strip() + if idx == len(lines): + return explanation, None + + idx += 1 + start_idx = idx + while not lines[idx].lstrip().startswith("```"): + idx += 1 + code = "\n".join(lines[start_idx:idx]).strip() + + return explanation, code + + +def clean_code_for_run(result): + result = f"I will use the following {result}" + explanation, code = result.split("Answer:") + explanation = explanation.strip() + code = code.strip() + + code_lines = code.split("\n") + if code_lines[0] in ["```", "```py", "```python"]: + code_lines = code_lines[1:] + if code_lines[-1] == "```": + code_lines = code_lines[:-1] + code = "\n".join(code_lines) + + return explanation, code + + +class Agent: + """ + Base class for all agents which contains the main API methods. + + Args: + chat_prompt_template (`str`, *optional*): + Pass along your own prompt if you want to override the default template for the `chat` method. Can be the + actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named + `chat_prompt_template.txt` in this repo in this case. + run_prompt_template (`str`, *optional*): + Pass along your own prompt if you want to override the default template for the `run` method. Can be the + actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named + `run_prompt_template.txt` in this repo in this case. + additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*): + Any additional tools to include on top of the default ones. If you pass along a tool with the same name as + one of the default tools, that default tool will be overridden. + """ + + def __init__(self, chat_prompt_template=None, run_prompt_template=None, additional_tools=None): + _setup_default_tools() + + agent_name = self.__class__.__name__ + self.chat_prompt_template = download_prompt(chat_prompt_template, agent_name, mode="chat") + self.run_prompt_template = download_prompt(run_prompt_template, agent_name, mode="run") + self._toolbox = HUGGINGFACE_DEFAULT_TOOLS.copy() + self.log = print + if additional_tools is not None: + if isinstance(additional_tools, (list, tuple)): + additional_tools = {t.name: t for t in additional_tools} + elif not isinstance(additional_tools, dict): + additional_tools = {additional_tools.name: additional_tools} + + replacements = {name: tool for name, tool in additional_tools.items() if name in HUGGINGFACE_DEFAULT_TOOLS} + self._toolbox.update(additional_tools) + if len(replacements) > 1: + names = "\n".join([f"- {n}: {t}" for n, t in replacements.items()]) + logger.warning( + f"The following tools have been replaced by the ones provided in `additional_tools`:\n{names}." + ) + elif len(replacements) == 1: + name = list(replacements.keys())[0] + logger.warning(f"{name} has been replaced by {replacements[name]} as provided in `additional_tools`.") + + self.prepare_for_new_chat() + + @property + def toolbox(self) -> Dict[str, Tool]: + """Get all tool currently available to the agent""" + return self._toolbox + + def format_prompt(self, task, chat_mode=False): + description = "\n".join([f"- {name}: {tool.description}" for name, tool in self.toolbox.items()]) + if chat_mode: + if self.chat_history is None: + prompt = self.chat_prompt_template.replace("<>", description) + else: + prompt = self.chat_history + prompt += CHAT_MESSAGE_PROMPT.replace("<>", task) + else: + prompt = self.run_prompt_template.replace("<>", description) + prompt = prompt.replace("<>", task) + return prompt + + def set_stream(self, streamer): + """ + Set the function use to stream results (which is `print` by default). + + Args: + streamer (`callable`): The function to call when streaming results from the LLM. + """ + self.log = streamer + + def chat(self, task, *, return_code=False, remote=False, **kwargs): + """ + Sends a new request to the agent in a chat. Will use the previous ones in its history. + + Args: + task (`str`): The task to perform + return_code (`bool`, *optional*, defaults to `False`): + Whether to just return code and not evaluate it. + remote (`bool`, *optional*, defaults to `False`): + Whether or not to use remote tools (inference endpoints) instead of local ones. + kwargs (additional keyword arguments, *optional*): + Any keyword argument to send to the agent when evaluating the code. + + Example: + + ```py + from transformers import HfAgent + + agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder") + agent.chat("Draw me a picture of rivers and lakes") + + agent.chat("Transform the picture so that there is a rock in there") + ``` + """ + prompt = self.format_prompt(task, chat_mode=True) + result = self.generate_one(prompt, stop=["Human:", "====="]) + self.chat_history = prompt + result.strip() + "\n" + explanation, code = clean_code_for_chat(result) + + self.log(f"==Explanation from the agent==\n{explanation}") + + if code is not None: + self.log(f"\n\n==Code generated by the agent==\n{code}") + if not return_code: + self.log("\n\n==Result==") + self.cached_tools = resolve_tools(code, self.toolbox, remote=remote, cached_tools=self.cached_tools) + self.chat_state.update(kwargs) + return evaluate(code, self.cached_tools, self.chat_state, chat_mode=True) + else: + tool_code = get_tool_creation_code(code, self.toolbox, remote=remote) + return f"{tool_code}\n{code}" + + def prepare_for_new_chat(self): + """ + Clears the history of prior calls to [`~Agent.chat`]. + """ + self.chat_history = None + self.chat_state = {} + self.cached_tools = None + + def run(self, task, *, return_code=False, remote=False, **kwargs): + """ + Sends a request to the agent. + + Args: + task (`str`): The task to perform + return_code (`bool`, *optional*, defaults to `False`): + Whether to just return code and not evaluate it. + remote (`bool`, *optional*, defaults to `False`): + Whether or not to use remote tools (inference endpoints) instead of local ones. + kwargs (additional keyword arguments, *optional*): + Any keyword argument to send to the agent when evaluating the code. + + Example: + + ```py + from transformers import HfAgent + + agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder") + agent.run("Draw me a picture of rivers and lakes") + ``` + """ + prompt = self.format_prompt(task) + result = self.generate_one(prompt, stop=["Task:"]) + explanation, code = clean_code_for_run(result) + + self.log(f"==Explanation from the agent==\n{explanation}") + + self.log(f"\n\n==Code generated by the agent==\n{code}") + if not return_code: + self.log("\n\n==Result==") + self.cached_tools = resolve_tools(code, self.toolbox, remote=remote, cached_tools=self.cached_tools) + return evaluate(code, self.cached_tools, state=kwargs.copy()) + else: + tool_code = get_tool_creation_code(code, self.toolbox, remote=remote) + return f"{tool_code}\n{code}" + + def generate_one(self, prompt, stop): + # This is the method to implement in your custom agent. + raise NotImplementedError + + def generate_many(self, prompts, stop): + # Override if you have a way to do batch generation faster than one by one + return [self.generate_one(prompt, stop) for prompt in prompts] + + +class OpenAiAgent(Agent): + """ + Agent that uses the openai API to generate code. + + + + The openAI models are used in generation mode, so even for the `chat()` API, it's better to use models like + `"text-davinci-003"` over the chat-GPT variant. Proper support for chat-GPT models will come in a next version. + + + + Args: + model (`str`, *optional*, defaults to `"text-davinci-003"`): + The name of the OpenAI model to use. + api_key (`str`, *optional*): + The API key to use. If unset, will look for the environment variable `"OPENAI_API_KEY"`. + chat_prompt_template (`str`, *optional*): + Pass along your own prompt if you want to override the default template for the `chat` method. Can be the + actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named + `chat_prompt_template.txt` in this repo in this case. + run_prompt_template (`str`, *optional*): + Pass along your own prompt if you want to override the default template for the `run` method. Can be the + actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named + `run_prompt_template.txt` in this repo in this case. + additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*): + Any additional tools to include on top of the default ones. If you pass along a tool with the same name as + one of the default tools, that default tool will be overridden. + + Example: + + ```py + from transformers import OpenAiAgent + + agent = OpenAiAgent(model="text-davinci-003", api_key=xxx) + agent.run("Is the following `text` (in Spanish) positive or negative?", text="¡Este es un API muy agradable!") + ``` + """ + + def __init__( + self, + model="text-davinci-003", + api_key=None, + chat_prompt_template=None, + run_prompt_template=None, + additional_tools=None, + ): + if not is_openai_available(): + raise ImportError("Using `OpenAiAgent` requires `openai`: `pip install openai`.") + + if api_key is None: + api_key = os.environ.get("OPENAI_API_KEY", None) + if api_key is None: + raise ValueError( + "You need an openai key to use `OpenAIAgent`. You can get one here: Get one here " + "https://openai.com/api/`. If you have one, set it in your env with `os.environ['OPENAI_API_KEY'] = " + "xxx." + ) + else: + openai.api_key = api_key + self.model = model + super().__init__( + chat_prompt_template=chat_prompt_template, + run_prompt_template=run_prompt_template, + additional_tools=additional_tools, + ) + + def generate_many(self, prompts, stop): + if "gpt" in self.model: + return [self._chat_generate(prompt, stop) for prompt in prompts] + else: + return self._completion_generate(prompts, stop) + + def generate_one(self, prompt, stop): + if "gpt" in self.model: + return self._chat_generate(prompt, stop) + else: + return self._completion_generate([prompt], stop)[0] + + def _chat_generate(self, prompt, stop): + result = openai.chat.completions.create( + model=self.model, + messages=[{"role": "user", "content": prompt}], + temperature=0, + stop=stop, + ) + return result.choices[0].message.content + + def _completion_generate(self, prompts, stop): + result = openai.Completion.create( + model=self.model, + prompt=prompts, + temperature=0, + stop=stop, + max_tokens=200, + ) + return [answer["text"] for answer in result["choices"]] + + +class AzureOpenAiAgent(Agent): + """ + Agent that uses Azure OpenAI to generate code. See the [official + documentation](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/) to learn how to deploy an openAI + model on Azure + + + + The openAI models are used in generation mode, so even for the `chat()` API, it's better to use models like + `"text-davinci-003"` over the chat-GPT variant. Proper support for chat-GPT models will come in a next version. + + + + Args: + deployment_id (`str`): + The name of the deployed Azure openAI model to use. + api_key (`str`, *optional*): + The API key to use. If unset, will look for the environment variable `"AZURE_OPENAI_API_KEY"`. + resource_name (`str`, *optional*): + The name of your Azure OpenAI Resource. If unset, will look for the environment variable + `"AZURE_OPENAI_RESOURCE_NAME"`. + api_version (`str`, *optional*, default to `"2022-12-01"`): + The API version to use for this agent. + is_chat_mode (`bool`, *optional*): + Whether you are using a completion model or a chat model (see note above, chat models won't be as + efficient). Will default to `gpt` being in the `deployment_id` or not. + chat_prompt_template (`str`, *optional*): + Pass along your own prompt if you want to override the default template for the `chat` method. Can be the + actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named + `chat_prompt_template.txt` in this repo in this case. + run_prompt_template (`str`, *optional*): + Pass along your own prompt if you want to override the default template for the `run` method. Can be the + actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named + `run_prompt_template.txt` in this repo in this case. + additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*): + Any additional tools to include on top of the default ones. If you pass along a tool with the same name as + one of the default tools, that default tool will be overridden. + + Example: + + ```py + from transformers import AzureOpenAiAgent + + agent = AzureAiAgent(deployment_id="Davinci-003", api_key=xxx, resource_name=yyy) + agent.run("Is the following `text` (in Spanish) positive or negative?", text="¡Este es un API muy agradable!") + ``` + """ + + def __init__( + self, + deployment_id, + api_key=None, + resource_name=None, + api_version="2022-12-01", + is_chat_model=None, + chat_prompt_template=None, + run_prompt_template=None, + additional_tools=None, + ): + if not is_openai_available(): + raise ImportError("Using `OpenAiAgent` requires `openai`: `pip install openai`.") + + self.deployment_id = deployment_id + openai.api_type = "azure" + if api_key is None: + api_key = os.environ.get("AZURE_OPENAI_API_KEY", None) + if api_key is None: + raise ValueError( + "You need an Azure openAI key to use `AzureOpenAIAgent`. If you have one, set it in your env with " + "`os.environ['AZURE_OPENAI_API_KEY'] = xxx." + ) + else: + openai.api_key = api_key + if resource_name is None: + resource_name = os.environ.get("AZURE_OPENAI_RESOURCE_NAME", None) + if resource_name is None: + raise ValueError( + "You need a resource_name to use `AzureOpenAIAgent`. If you have one, set it in your env with " + "`os.environ['AZURE_OPENAI_RESOURCE_NAME'] = xxx." + ) + else: + openai.api_base = f"https://{resource_name}.openai.azure.com" + openai.api_version = api_version + + if is_chat_model is None: + is_chat_model = "gpt" in deployment_id.lower() + self.is_chat_model = is_chat_model + + super().__init__( + chat_prompt_template=chat_prompt_template, + run_prompt_template=run_prompt_template, + additional_tools=additional_tools, + ) + + def generate_many(self, prompts, stop): + if self.is_chat_model: + return [self._chat_generate(prompt, stop) for prompt in prompts] + else: + return self._completion_generate(prompts, stop) + + def generate_one(self, prompt, stop): + if self.is_chat_model: + return self._chat_generate(prompt, stop) + else: + return self._completion_generate([prompt], stop)[0] + + def _chat_generate(self, prompt, stop): + result = openai.ChatCompletion.create( + engine=self.deployment_id, + messages=[{"role": "user", "content": prompt}], + temperature=0, + stop=stop, + ) + return result["choices"][0]["message"]["content"] + + def _completion_generate(self, prompts, stop): + result = openai.Completion.create( + engine=self.deployment_id, + prompt=prompts, + temperature=0, + stop=stop, + max_tokens=200, + ) + return [answer["text"] for answer in result["choices"]] + + +class HfAgent(Agent): + """ + Agent that uses an inference endpoint to generate code. + + Args: + url_endpoint (`str`): + The name of the url endpoint to use. + token (`str`, *optional*): + The token to use as HTTP bearer authorization for remote files. If unset, will use the token generated when + running `huggingface-cli login` (stored in `~/.huggingface`). + chat_prompt_template (`str`, *optional*): + Pass along your own prompt if you want to override the default template for the `chat` method. Can be the + actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named + `chat_prompt_template.txt` in this repo in this case. + run_prompt_template (`str`, *optional*): + Pass along your own prompt if you want to override the default template for the `run` method. Can be the + actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named + `run_prompt_template.txt` in this repo in this case. + additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*): + Any additional tools to include on top of the default ones. If you pass along a tool with the same name as + one of the default tools, that default tool will be overridden. + + Example: + + ```py + from transformers import HfAgent + + agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder") + agent.run("Is the following `text` (in Spanish) positive or negative?", text="¡Este es un API muy agradable!") + ``` + """ + + def __init__( + self, url_endpoint, token=None, chat_prompt_template=None, run_prompt_template=None, additional_tools=None + ): + self.url_endpoint = url_endpoint + if token is None: + self.token = f"Bearer {HfFolder().get_token()}" + elif token.startswith("Bearer") or token.startswith("Basic"): + self.token = token + else: + self.token = f"Bearer {token}" + super().__init__( + chat_prompt_template=chat_prompt_template, + run_prompt_template=run_prompt_template, + additional_tools=additional_tools, + ) + + def generate_one(self, prompt, stop): + headers = {"Authorization": self.token} + inputs = { + "inputs": prompt, + "parameters": {"max_new_tokens": 200, "return_full_text": False, "stop": stop}, + } + + response = requests.post(self.url_endpoint, json=inputs, headers=headers) + if response.status_code == 429: + logger.info("Getting rate-limited, waiting a tiny bit before trying again.") + time.sleep(1) + return self._generate_one(prompt) + elif response.status_code != 200: + raise ValueError(f"Error {response.status_code}: {response.json()}") + + result = response.json()[0]["generated_text"] + # Inference API returns the stop sequence + for stop_seq in stop: + if result.endswith(stop_seq): + return result[: -len(stop_seq)] + return result + + +class LocalAgent(Agent): + """ + Agent that uses a local model and tokenizer to generate code. + + Args: + model ([`PreTrainedModel`]): + The model to use for the agent. + tokenizer ([`PreTrainedTokenizer`]): + The tokenizer to use for the agent. + chat_prompt_template (`str`, *optional*): + Pass along your own prompt if you want to override the default template for the `chat` method. Can be the + actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named + `chat_prompt_template.txt` in this repo in this case. + run_prompt_template (`str`, *optional*): + Pass along your own prompt if you want to override the default template for the `run` method. Can be the + actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named + `run_prompt_template.txt` in this repo in this case. + additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*): + Any additional tools to include on top of the default ones. If you pass along a tool with the same name as + one of the default tools, that default tool will be overridden. + + Example: + + ```py + import torch + from transformers import AutoModelForCausalLM, AutoTokenizer, LocalAgent + + checkpoint = "bigcode/starcoder" + model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map="auto", torch_dtype=torch.bfloat16) + tokenizer = AutoTokenizer.from_pretrained(checkpoint) + + agent = LocalAgent(model, tokenizer) + agent.run("Draw me a picture of rivers and lakes.") + ``` + """ + + def __init__(self, model, tokenizer, chat_prompt_template=None, run_prompt_template=None, additional_tools=None): + self.model = model + self.tokenizer = tokenizer + super().__init__( + chat_prompt_template=chat_prompt_template, + run_prompt_template=run_prompt_template, + additional_tools=additional_tools, + ) + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): + """ + Convenience method to build a `LocalAgent` from a pretrained checkpoint. + + Args: + pretrained_model_name_or_path (`str` or `os.PathLike`): + The name of a repo on the Hub or a local path to a folder containing both model and tokenizer. + kwargs (`Dict[str, Any]`, *optional*): + Keyword arguments passed along to [`~PreTrainedModel.from_pretrained`]. + + Example: + + ```py + import torch + from transformers import LocalAgent + + agent = LocalAgent.from_pretrained("bigcode/starcoder", device_map="auto", torch_dtype=torch.bfloat16) + agent.run("Draw me a picture of rivers and lakes.") + ``` + """ + model = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path, **kwargs) + tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, **kwargs) + return cls(model, tokenizer) + + @property + def _model_device(self): + if hasattr(self.model, "hf_device_map"): + return list(self.model.hf_device_map.values())[0] + for param in self.model.parameters(): + return param.device + + def generate_one(self, prompt, stop): + encoded_inputs = self.tokenizer(prompt, return_tensors="pt").to(self._model_device) + src_len = encoded_inputs["input_ids"].shape[1] + stopping_criteria = StoppingCriteriaList([StopSequenceCriteria(stop, self.tokenizer)]) + outputs = self.model.generate( + encoded_inputs["input_ids"], max_new_tokens=200, stopping_criteria=stopping_criteria + ) + + result = self.tokenizer.decode(outputs[0].tolist()[src_len:]) + # Inference API returns the stop sequence + for stop_seq in stop: + if result.endswith(stop_seq): + result = result[: -len(stop_seq)] + return result + + +class StopSequenceCriteria(StoppingCriteria): + """ + This class can be used to stop generation whenever a sequence of tokens is encountered. + + Args: + stop_sequences (`str` or `List[str]`): + The sequence (or list of sequences) on which to stop execution. + tokenizer: + The tokenizer used to decode the model outputs. + """ + + def __init__(self, stop_sequences, tokenizer): + if isinstance(stop_sequences, str): + stop_sequences = [stop_sequences] + self.stop_sequences = stop_sequences + self.tokenizer = tokenizer + + def __call__(self, input_ids, scores, **kwargs) -> bool: + decoded_output = self.tokenizer.decode(input_ids.tolist()[0]) + return any(decoded_output.endswith(stop_sequence) for stop_sequence in self.stop_sequences) diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/base.py b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/base.py new file mode 100644 index 0000000000000000000000000000000000000000..4042b28ac64c09a2b47ff0fa8c85a8293e33f618 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/base.py @@ -0,0 +1,757 @@ +#!/usr/bin/env python +# coding=utf-8 + +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import base64 +import importlib +import inspect +import io +import json +import os +import tempfile +from typing import Any, Dict, List, Optional, Union + +from huggingface_hub import create_repo, hf_hub_download, metadata_update, upload_folder +from huggingface_hub.utils import RepositoryNotFoundError, build_hf_headers, get_session + +from ..dynamic_module_utils import custom_object_save, get_class_from_dynamic_module, get_imports +from ..image_utils import is_pil_image +from ..models.auto import AutoProcessor +from ..utils import ( + CONFIG_NAME, + cached_file, + is_accelerate_available, + is_torch_available, + is_vision_available, + logging, +) +from .agent_types import handle_agent_inputs, handle_agent_outputs + + +logger = logging.get_logger(__name__) + +if is_torch_available(): + import torch + +if is_accelerate_available(): + from accelerate.utils import send_to_device + + +TOOL_CONFIG_FILE = "tool_config.json" + + +def get_repo_type(repo_id, repo_type=None, **hub_kwargs): + if repo_type is not None: + return repo_type + try: + hf_hub_download(repo_id, TOOL_CONFIG_FILE, repo_type="space", **hub_kwargs) + return "space" + except RepositoryNotFoundError: + try: + hf_hub_download(repo_id, TOOL_CONFIG_FILE, repo_type="model", **hub_kwargs) + return "model" + except RepositoryNotFoundError: + raise EnvironmentError(f"`{repo_id}` does not seem to be a valid repo identifier on the Hub.") + except Exception: + return "model" + except Exception: + return "space" + + +# docstyle-ignore +APP_FILE_TEMPLATE = """from transformers import launch_gradio_demo +from {module_name} import {class_name} + +launch_gradio_demo({class_name}) +""" + + +class Tool: + """ + A base class for the functions used by the agent. Subclass this and implement the `__call__` method as well as the + following class attributes: + + - **description** (`str`) -- A short description of what your tool does, the inputs it expects and the output(s) it + will return. For instance 'This is a tool that downloads a file from a `url`. It takes the `url` as input, and + returns the text contained in the file'. + - **name** (`str`) -- A performative name that will be used for your tool in the prompt to the agent. For instance + `"text-classifier"` or `"image_generator"`. + - **inputs** (`List[str]`) -- The list of modalities expected for the inputs (in the same order as in the call). + Modalitiies should be `"text"`, `"image"` or `"audio"`. This is only used by `launch_gradio_demo` or to make a + nice space from your tool. + - **outputs** (`List[str]`) -- The list of modalities returned but the tool (in the same order as the return of the + call method). Modalitiies should be `"text"`, `"image"` or `"audio"`. This is only used by `launch_gradio_demo` + or to make a nice space from your tool. + + You can also override the method [`~Tool.setup`] if your tool as an expensive operation to perform before being + usable (such as loading a model). [`~Tool.setup`] will be called the first time you use your tool, but not at + instantiation. + """ + + description: str = "This is a tool that ..." + name: str = "" + + inputs: List[str] + outputs: List[str] + + def __init__(self, *args, **kwargs): + self.is_initialized = False + + def __call__(self, *args, **kwargs): + return NotImplemented("Write this method in your subclass of `Tool`.") + + def setup(self): + """ + Overwrite this method here for any operation that is expensive and needs to be executed before you start using + your tool. Such as loading a big model. + """ + self.is_initialized = True + + def save(self, output_dir): + """ + Saves the relevant code files for your tool so it can be pushed to the Hub. This will copy the code of your + tool in `output_dir` as well as autogenerate: + + - a config file named `tool_config.json` + - an `app.py` file so that your tool can be converted to a space + - a `requirements.txt` containing the names of the module used by your tool (as detected when inspecting its + code) + + You should only use this method to save tools that are defined in a separate module (not `__main__`). + + Args: + output_dir (`str`): The folder in which you want to save your tool. + """ + os.makedirs(output_dir, exist_ok=True) + # Save module file + if self.__module__ == "__main__": + raise ValueError( + f"We can't save the code defining {self} in {output_dir} as it's been defined in __main__. You " + "have to put this code in a separate module so we can include it in the saved folder." + ) + module_files = custom_object_save(self, output_dir) + + module_name = self.__class__.__module__ + last_module = module_name.split(".")[-1] + full_name = f"{last_module}.{self.__class__.__name__}" + + # Save config file + config_file = os.path.join(output_dir, "tool_config.json") + if os.path.isfile(config_file): + with open(config_file, "r", encoding="utf-8") as f: + tool_config = json.load(f) + else: + tool_config = {} + + tool_config = {"tool_class": full_name, "description": self.description, "name": self.name} + with open(config_file, "w", encoding="utf-8") as f: + f.write(json.dumps(tool_config, indent=2, sort_keys=True) + "\n") + + # Save app file + app_file = os.path.join(output_dir, "app.py") + with open(app_file, "w", encoding="utf-8") as f: + f.write(APP_FILE_TEMPLATE.format(module_name=last_module, class_name=self.__class__.__name__)) + + # Save requirements file + requirements_file = os.path.join(output_dir, "requirements.txt") + imports = [] + for module in module_files: + imports.extend(get_imports(module)) + imports = list(set(imports)) + with open(requirements_file, "w", encoding="utf-8") as f: + f.write("\n".join(imports) + "\n") + + @classmethod + def from_hub( + cls, + repo_id: str, + model_repo_id: Optional[str] = None, + token: Optional[str] = None, + remote: bool = False, + **kwargs, + ): + """ + Loads a tool defined on the Hub. + + Args: + repo_id (`str`): + The name of the repo on the Hub where your tool is defined. + model_repo_id (`str`, *optional*): + If your tool uses a model and you want to use a different model than the default, you can pass a second + repo ID or an endpoint url to this argument. + token (`str`, *optional*): + The token to identify you on hf.co. If unset, will use the token generated when running + `huggingface-cli login` (stored in `~/.huggingface`). + remote (`bool`, *optional*, defaults to `False`): + Whether to use your tool by downloading the model or (if it is available) with an inference endpoint. + kwargs (additional keyword arguments, *optional*): + Additional keyword arguments that will be split in two: all arguments relevant to the Hub (such as + `cache_dir`, `revision`, `subfolder`) will be used when downloading the files for your tool, and the + others will be passed along to its init. + """ + if remote and model_repo_id is None: + endpoints = get_default_endpoints() + if repo_id not in endpoints: + raise ValueError( + f"Could not infer a default endpoint for {repo_id}, you need to pass one using the " + "`model_repo_id` argument." + ) + model_repo_id = endpoints[repo_id] + hub_kwargs_names = [ + "cache_dir", + "force_download", + "resume_download", + "proxies", + "revision", + "repo_type", + "subfolder", + "local_files_only", + ] + hub_kwargs = {k: v for k, v in kwargs.items() if k in hub_kwargs_names} + + # Try to get the tool config first. + hub_kwargs["repo_type"] = get_repo_type(repo_id, **hub_kwargs) + resolved_config_file = cached_file( + repo_id, + TOOL_CONFIG_FILE, + token=token, + **hub_kwargs, + _raise_exceptions_for_missing_entries=False, + _raise_exceptions_for_connection_errors=False, + ) + is_tool_config = resolved_config_file is not None + if resolved_config_file is None: + resolved_config_file = cached_file( + repo_id, + CONFIG_NAME, + token=token, + **hub_kwargs, + _raise_exceptions_for_missing_entries=False, + _raise_exceptions_for_connection_errors=False, + ) + if resolved_config_file is None: + raise EnvironmentError( + f"{repo_id} does not appear to provide a valid configuration in `tool_config.json` or `config.json`." + ) + + with open(resolved_config_file, encoding="utf-8") as reader: + config = json.load(reader) + + if not is_tool_config: + if "custom_tool" not in config: + raise EnvironmentError( + f"{repo_id} does not provide a mapping to custom tools in its configuration `config.json`." + ) + custom_tool = config["custom_tool"] + else: + custom_tool = config + + tool_class = custom_tool["tool_class"] + tool_class = get_class_from_dynamic_module(tool_class, repo_id, token=token, **hub_kwargs) + + if len(tool_class.name) == 0: + tool_class.name = custom_tool["name"] + if tool_class.name != custom_tool["name"]: + logger.warning( + f"{tool_class.__name__} implements a different name in its configuration and class. Using the tool " + "configuration name." + ) + tool_class.name = custom_tool["name"] + + if len(tool_class.description) == 0: + tool_class.description = custom_tool["description"] + if tool_class.description != custom_tool["description"]: + logger.warning( + f"{tool_class.__name__} implements a different description in its configuration and class. Using the " + "tool configuration description." + ) + tool_class.description = custom_tool["description"] + + if remote: + return RemoteTool(model_repo_id, token=token, tool_class=tool_class) + return tool_class(model_repo_id, token=token, **kwargs) + + def push_to_hub( + self, + repo_id: str, + commit_message: str = "Upload tool", + private: Optional[bool] = None, + token: Optional[Union[bool, str]] = None, + create_pr: bool = False, + ) -> str: + """ + Upload the tool to the Hub. + + Parameters: + repo_id (`str`): + The name of the repository you want to push your tool to. It should contain your organization name when + pushing to a given organization. + commit_message (`str`, *optional*, defaults to `"Upload tool"`): + Message to commit while pushing. + private (`bool`, *optional*): + Whether or not the repository created should be private. + token (`bool` or `str`, *optional*): + The token to use as HTTP bearer authorization for remote files. If unset, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). + create_pr (`bool`, *optional*, defaults to `False`): + Whether or not to create a PR with the uploaded files or directly commit. + """ + repo_url = create_repo( + repo_id=repo_id, token=token, private=private, exist_ok=True, repo_type="space", space_sdk="gradio" + ) + repo_id = repo_url.repo_id + metadata_update(repo_id, {"tags": ["tool"]}, repo_type="space") + + with tempfile.TemporaryDirectory() as work_dir: + # Save all files. + self.save(work_dir) + logger.info(f"Uploading the following files to {repo_id}: {','.join(os.listdir(work_dir))}") + return upload_folder( + repo_id=repo_id, + commit_message=commit_message, + folder_path=work_dir, + token=token, + create_pr=create_pr, + repo_type="space", + ) + + @staticmethod + def from_gradio(gradio_tool): + """ + Creates a [`Tool`] from a gradio tool. + """ + + class GradioToolWrapper(Tool): + def __init__(self, _gradio_tool): + super().__init__() + self.name = _gradio_tool.name + self.description = _gradio_tool.description + + GradioToolWrapper.__call__ = gradio_tool.run + return GradioToolWrapper(gradio_tool) + + +class RemoteTool(Tool): + """ + A [`Tool`] that will make requests to an inference endpoint. + + Args: + endpoint_url (`str`, *optional*): + The url of the endpoint to use. + token (`str`, *optional*): + The token to use as HTTP bearer authorization for remote files. If unset, will use the token generated when + running `huggingface-cli login` (stored in `~/.huggingface`). + tool_class (`type`, *optional*): + The corresponding `tool_class` if this is a remote version of an existing tool. Will help determine when + the output should be converted to another type (like images). + """ + + def __init__(self, endpoint_url=None, token=None, tool_class=None): + self.endpoint_url = endpoint_url + self.client = EndpointClient(endpoint_url, token=token) + self.tool_class = tool_class + + def prepare_inputs(self, *args, **kwargs): + """ + Prepare the inputs received for the HTTP client sending data to the endpoint. Positional arguments will be + matched with the signature of the `tool_class` if it was provided at instantation. Images will be encoded into + bytes. + + You can override this method in your custom class of [`RemoteTool`]. + """ + inputs = kwargs.copy() + if len(args) > 0: + if self.tool_class is not None: + # Match args with the signature + if issubclass(self.tool_class, PipelineTool): + call_method = self.tool_class.encode + else: + call_method = self.tool_class.__call__ + signature = inspect.signature(call_method).parameters + parameters = [ + k + for k, p in signature.items() + if p.kind not in [inspect._ParameterKind.VAR_POSITIONAL, inspect._ParameterKind.VAR_KEYWORD] + ] + if parameters[0] == "self": + parameters = parameters[1:] + if len(args) > len(parameters): + raise ValueError( + f"{self.tool_class} only accepts {len(parameters)} arguments but {len(args)} were given." + ) + for arg, name in zip(args, parameters): + inputs[name] = arg + elif len(args) > 1: + raise ValueError("A `RemoteTool` can only accept one positional input.") + elif len(args) == 1: + if is_pil_image(args[0]): + return {"inputs": self.client.encode_image(args[0])} + return {"inputs": args[0]} + + for key, value in inputs.items(): + if is_pil_image(value): + inputs[key] = self.client.encode_image(value) + + return {"inputs": inputs} + + def extract_outputs(self, outputs): + """ + You can override this method in your custom class of [`RemoteTool`] to apply some custom post-processing of the + outputs of the endpoint. + """ + return outputs + + def __call__(self, *args, **kwargs): + args, kwargs = handle_agent_inputs(*args, **kwargs) + + output_image = self.tool_class is not None and self.tool_class.outputs == ["image"] + inputs = self.prepare_inputs(*args, **kwargs) + if isinstance(inputs, dict): + outputs = self.client(**inputs, output_image=output_image) + else: + outputs = self.client(inputs, output_image=output_image) + if isinstance(outputs, list) and len(outputs) == 1 and isinstance(outputs[0], list): + outputs = outputs[0] + + outputs = handle_agent_outputs(outputs, self.tool_class.outputs if self.tool_class is not None else None) + + return self.extract_outputs(outputs) + + +class PipelineTool(Tool): + """ + A [`Tool`] tailored towards Transformer models. On top of the class attributes of the base class [`Tool`], you will + need to specify: + + - **model_class** (`type`) -- The class to use to load the model in this tool. + - **default_checkpoint** (`str`) -- The default checkpoint that should be used when the user doesn't specify one. + - **pre_processor_class** (`type`, *optional*, defaults to [`AutoProcessor`]) -- The class to use to load the + pre-processor + - **post_processor_class** (`type`, *optional*, defaults to [`AutoProcessor`]) -- The class to use to load the + post-processor (when different from the pre-processor). + + Args: + model (`str` or [`PreTrainedModel`], *optional*): + The name of the checkpoint to use for the model, or the instantiated model. If unset, will default to the + value of the class attribute `default_checkpoint`. + pre_processor (`str` or `Any`, *optional*): + The name of the checkpoint to use for the pre-processor, or the instantiated pre-processor (can be a + tokenizer, an image processor, a feature extractor or a processor). Will default to the value of `model` if + unset. + post_processor (`str` or `Any`, *optional*): + The name of the checkpoint to use for the post-processor, or the instantiated pre-processor (can be a + tokenizer, an image processor, a feature extractor or a processor). Will default to the `pre_processor` if + unset. + device (`int`, `str` or `torch.device`, *optional*): + The device on which to execute the model. Will default to any accelerator available (GPU, MPS etc...), the + CPU otherwise. + device_map (`str` or `dict`, *optional*): + If passed along, will be used to instantiate the model. + model_kwargs (`dict`, *optional*): + Any keyword argument to send to the model instantiation. + token (`str`, *optional*): + The token to use as HTTP bearer authorization for remote files. If unset, will use the token generated when + running `huggingface-cli login` (stored in `~/.huggingface`). + hub_kwargs (additional keyword arguments, *optional*): + Any additional keyword argument to send to the methods that will load the data from the Hub. + """ + + pre_processor_class = AutoProcessor + model_class = None + post_processor_class = AutoProcessor + default_checkpoint = None + + def __init__( + self, + model=None, + pre_processor=None, + post_processor=None, + device=None, + device_map=None, + model_kwargs=None, + token=None, + **hub_kwargs, + ): + if not is_torch_available(): + raise ImportError("Please install torch in order to use this tool.") + + if not is_accelerate_available(): + raise ImportError("Please install accelerate in order to use this tool.") + + if model is None: + if self.default_checkpoint is None: + raise ValueError("This tool does not implement a default checkpoint, you need to pass one.") + model = self.default_checkpoint + if pre_processor is None: + pre_processor = model + + self.model = model + self.pre_processor = pre_processor + self.post_processor = post_processor + self.device = device + self.device_map = device_map + self.model_kwargs = {} if model_kwargs is None else model_kwargs + if device_map is not None: + self.model_kwargs["device_map"] = device_map + self.hub_kwargs = hub_kwargs + self.hub_kwargs["token"] = token + + super().__init__() + + def setup(self): + """ + Instantiates the `pre_processor`, `model` and `post_processor` if necessary. + """ + if isinstance(self.pre_processor, str): + self.pre_processor = self.pre_processor_class.from_pretrained(self.pre_processor, **self.hub_kwargs) + + if isinstance(self.model, str): + self.model = self.model_class.from_pretrained(self.model, **self.model_kwargs, **self.hub_kwargs) + + if self.post_processor is None: + self.post_processor = self.pre_processor + elif isinstance(self.post_processor, str): + self.post_processor = self.post_processor_class.from_pretrained(self.post_processor, **self.hub_kwargs) + + if self.device is None: + if self.device_map is not None: + self.device = list(self.model.hf_device_map.values())[0] + else: + self.device = get_default_device() + + if self.device_map is None: + self.model.to(self.device) + + super().setup() + + def encode(self, raw_inputs): + """ + Uses the `pre_processor` to prepare the inputs for the `model`. + """ + return self.pre_processor(raw_inputs) + + def forward(self, inputs): + """ + Sends the inputs through the `model`. + """ + with torch.no_grad(): + return self.model(**inputs) + + def decode(self, outputs): + """ + Uses the `post_processor` to decode the model output. + """ + return self.post_processor(outputs) + + def __call__(self, *args, **kwargs): + args, kwargs = handle_agent_inputs(*args, **kwargs) + + if not self.is_initialized: + self.setup() + + encoded_inputs = self.encode(*args, **kwargs) + encoded_inputs = send_to_device(encoded_inputs, self.device) + outputs = self.forward(encoded_inputs) + outputs = send_to_device(outputs, "cpu") + decoded_outputs = self.decode(outputs) + + return handle_agent_outputs(decoded_outputs, self.outputs) + + +def launch_gradio_demo(tool_class: Tool): + """ + Launches a gradio demo for a tool. The corresponding tool class needs to properly implement the class attributes + `inputs` and `outputs`. + + Args: + tool_class (`type`): The class of the tool for which to launch the demo. + """ + try: + import gradio as gr + except ImportError: + raise ImportError("Gradio should be installed in order to launch a gradio demo.") + + tool = tool_class() + + def fn(*args, **kwargs): + return tool(*args, **kwargs) + + gr.Interface( + fn=fn, + inputs=tool_class.inputs, + outputs=tool_class.outputs, + title=tool_class.__name__, + article=tool.description, + ).launch() + + +# TODO: Migrate to Accelerate for this once `PartialState.default_device` makes its way into a release. +def get_default_device(): + logger.warning( + "`get_default_device` is deprecated and will be replaced with `accelerate`'s `PartialState().default_device` " + "in version 4.38 of 🤗 Transformers. " + ) + if not is_torch_available(): + raise ImportError("Please install torch in order to use this tool.") + + if torch.backends.mps.is_available() and torch.backends.mps.is_built(): + return torch.device("mps") + elif torch.cuda.is_available(): + return torch.device("cuda") + else: + return torch.device("cpu") + + +TASK_MAPPING = { + "document-question-answering": "DocumentQuestionAnsweringTool", + "image-captioning": "ImageCaptioningTool", + "image-question-answering": "ImageQuestionAnsweringTool", + "image-segmentation": "ImageSegmentationTool", + "speech-to-text": "SpeechToTextTool", + "summarization": "TextSummarizationTool", + "text-classification": "TextClassificationTool", + "text-question-answering": "TextQuestionAnsweringTool", + "text-to-speech": "TextToSpeechTool", + "translation": "TranslationTool", +} + + +def get_default_endpoints(): + endpoints_file = cached_file("huggingface-tools/default-endpoints", "default_endpoints.json", repo_type="dataset") + with open(endpoints_file, "r", encoding="utf-8") as f: + endpoints = json.load(f) + return endpoints + + +def supports_remote(task_or_repo_id): + endpoints = get_default_endpoints() + return task_or_repo_id in endpoints + + +def load_tool(task_or_repo_id, model_repo_id=None, remote=False, token=None, **kwargs): + """ + Main function to quickly load a tool, be it on the Hub or in the Transformers library. + + Args: + task_or_repo_id (`str`): + The task for which to load the tool or a repo ID of a tool on the Hub. Tasks implemented in Transformers + are: + + - `"document-question-answering"` + - `"image-captioning"` + - `"image-question-answering"` + - `"image-segmentation"` + - `"speech-to-text"` + - `"summarization"` + - `"text-classification"` + - `"text-question-answering"` + - `"text-to-speech"` + - `"translation"` + + model_repo_id (`str`, *optional*): + Use this argument to use a different model than the default one for the tool you selected. + remote (`bool`, *optional*, defaults to `False`): + Whether to use your tool by downloading the model or (if it is available) with an inference endpoint. + token (`str`, *optional*): + The token to identify you on hf.co. If unset, will use the token generated when running `huggingface-cli + login` (stored in `~/.huggingface`). + kwargs (additional keyword arguments, *optional*): + Additional keyword arguments that will be split in two: all arguments relevant to the Hub (such as + `cache_dir`, `revision`, `subfolder`) will be used when downloading the files for your tool, and the others + will be passed along to its init. + """ + if task_or_repo_id in TASK_MAPPING: + tool_class_name = TASK_MAPPING[task_or_repo_id] + main_module = importlib.import_module("transformers") + tools_module = main_module.tools + tool_class = getattr(tools_module, tool_class_name) + + if remote: + if model_repo_id is None: + endpoints = get_default_endpoints() + if task_or_repo_id not in endpoints: + raise ValueError( + f"Could not infer a default endpoint for {task_or_repo_id}, you need to pass one using the " + "`model_repo_id` argument." + ) + model_repo_id = endpoints[task_or_repo_id] + return RemoteTool(model_repo_id, token=token, tool_class=tool_class) + else: + return tool_class(model_repo_id, token=token, **kwargs) + else: + return Tool.from_hub(task_or_repo_id, model_repo_id=model_repo_id, token=token, remote=remote, **kwargs) + + +def add_description(description): + """ + A decorator that adds a description to a function. + """ + + def inner(func): + func.description = description + func.name = func.__name__ + return func + + return inner + + +## Will move to the Hub +class EndpointClient: + def __init__(self, endpoint_url: str, token: Optional[str] = None): + self.headers = {**build_hf_headers(token=token), "Content-Type": "application/json"} + self.endpoint_url = endpoint_url + + @staticmethod + def encode_image(image): + _bytes = io.BytesIO() + image.save(_bytes, format="PNG") + b64 = base64.b64encode(_bytes.getvalue()) + return b64.decode("utf-8") + + @staticmethod + def decode_image(raw_image): + if not is_vision_available(): + raise ImportError( + "This tool returned an image but Pillow is not installed. Please install it (`pip install Pillow`)." + ) + + from PIL import Image + + b64 = base64.b64decode(raw_image) + _bytes = io.BytesIO(b64) + return Image.open(_bytes) + + def __call__( + self, + inputs: Optional[Union[str, Dict, List[str], List[List[str]]]] = None, + params: Optional[Dict] = None, + data: Optional[bytes] = None, + output_image: bool = False, + ) -> Any: + # Build payload + payload = {} + if inputs: + payload["inputs"] = inputs + if params: + payload["parameters"] = params + + # Make API call + response = get_session().post(self.endpoint_url, headers=self.headers, json=payload, data=data) + + # By default, parse the response for the user. + if output_image: + return self.decode_image(response.content) + else: + return response.json() diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/document_question_answering.py b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/document_question_answering.py new file mode 100644 index 0000000000000000000000000000000000000000..7b5e8782bd785f18001a4d7f3e3dac6a840506c5 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/document_question_answering.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python +# coding=utf-8 + +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import re + +from ..models.auto import AutoProcessor +from ..models.vision_encoder_decoder import VisionEncoderDecoderModel +from ..utils import is_vision_available +from .base import PipelineTool + + +if is_vision_available(): + from PIL import Image + + +class DocumentQuestionAnsweringTool(PipelineTool): + default_checkpoint = "naver-clova-ix/donut-base-finetuned-docvqa" + description = ( + "This is a tool that answers a question about an document (pdf). It takes an input named `document` which " + "should be the document containing the information, as well as a `question` that is the question about the " + "document. It returns a text that contains the answer to the question." + ) + name = "document_qa" + pre_processor_class = AutoProcessor + model_class = VisionEncoderDecoderModel + + inputs = ["image", "text"] + outputs = ["text"] + + def __init__(self, *args, **kwargs): + if not is_vision_available(): + raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.") + + super().__init__(*args, **kwargs) + + def encode(self, document: "Image", question: str): + task_prompt = "{user_input}" + prompt = task_prompt.replace("{user_input}", question) + decoder_input_ids = self.pre_processor.tokenizer( + prompt, add_special_tokens=False, return_tensors="pt" + ).input_ids + pixel_values = self.pre_processor(document, return_tensors="pt").pixel_values + + return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} + + def forward(self, inputs): + return self.model.generate( + inputs["pixel_values"].to(self.device), + decoder_input_ids=inputs["decoder_input_ids"].to(self.device), + max_length=self.model.decoder.config.max_position_embeddings, + early_stopping=True, + pad_token_id=self.pre_processor.tokenizer.pad_token_id, + eos_token_id=self.pre_processor.tokenizer.eos_token_id, + use_cache=True, + num_beams=1, + bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]], + return_dict_in_generate=True, + ).sequences + + def decode(self, outputs): + sequence = self.pre_processor.batch_decode(outputs)[0] + sequence = sequence.replace(self.pre_processor.tokenizer.eos_token, "") + sequence = sequence.replace(self.pre_processor.tokenizer.pad_token, "") + sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token + sequence = self.pre_processor.token2json(sequence) + + return sequence["answer"] diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/evaluate_agent.py b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/evaluate_agent.py new file mode 100644 index 0000000000000000000000000000000000000000..7d5cddf1c9d01f6e710d784a96ad24c1e00a7bc1 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/evaluate_agent.py @@ -0,0 +1,692 @@ +#!/usr/bin/env python +# coding=utf-8 + +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from .agents import BASE_PYTHON_TOOLS, clean_code_for_chat, clean_code_for_run +from .python_interpreter import InterpretorError, evaluate + + +### Fake tools for test +def classifier(text, labels): + return f"This is the classification of {text} along {labels}." + + +def translator(text, src_lang, tgt_lang): + return f"This is the translation of {text} from {src_lang} to {tgt_lang}." + + +def speaker(text): + return f"This is actually a sound reading {text}." + + +def transcriber(audio): + if "sound" not in audio: + raise ValueError(f"`audio` ({audio}) is not a sound.") + return f"This is the transcribed text from {audio}." + + +def image_generator(prompt): + return f"This is actually an image representing {prompt}." + + +def image_captioner(image): + if "image" not in image: + raise ValueError(f"`image` ({image}) is not an image.") + return f"This is a description of {image}." + + +def image_transformer(image, prompt): + if "image" not in image: + raise ValueError(f"`image` ({image}) is not an image.") + return f"This is a transformation of {image} according to {prompt}." + + +def question_answerer(text, question): + return f"This is the answer to {question} from {text}." + + +def image_qa(image, question): + if "image" not in image: + raise ValueError(f"`image` ({image}) is not an image.") + return f"This is the answer to {question} from {image}." + + +def text_downloader(url): + return f"This is the content of {url}." + + +def summarizer(text): + return f"This is a summary of {text}." + + +def video_generator(prompt, seconds=2): + return f"A video of {prompt}" + + +def document_qa(image, question): + return f"This is the answer to {question} from the document {image}." + + +def image_segmenter(image, prompt): + return f"This is the mask of {prompt} in {image}" + + +TEST_TOOLS = { + "text_classifier": classifier, + "translator": translator, + "text_reader": speaker, + "summarizer": summarizer, + "transcriber": transcriber, + "image_generator": image_generator, + "image_captioner": image_captioner, + "image_transformer": image_transformer, + "text_qa": question_answerer, + "text_downloader": text_downloader, + "image_qa": image_qa, + "video_generator": video_generator, + "document_qa": document_qa, + "image_segmenter": image_segmenter, +} + + +class Problem: + """ + A class regrouping all the information to solve a problem on which we will evaluate agents. + + Args: + task (`str` ou `list[str]`): + One or several descriptions of the task to perform. If a list, it should contain variations on the + phrasing, but for the same task. + inputs (`list[str]` or `dict[str, str]`): + The inputs that will be fed to the tools. For this testing environment, only strings are accepted as + values. Pass along a dictionary when you want to specify the values of each inputs, or just the list of + inputs expected (the value used will be `<>` in this case). + answer (`str` or `list[str`]): + The theoretical answer (or list of possible valid answers) to the problem, as code. + """ + + def __init__(self, task, inputs, answer): + self.task = task + self.inputs = inputs + self.answer = answer + + +### The list of problems the agent will be evaluated on. +EVALUATION_TASKS = [ + Problem( + task=[ + "Is the following `text` (in Spanish) positive or negative?", + "Is the text in the variable `text` (in Spanish) positive or negative?", + "Translate the following `text` from Spanish to English then tell me if its positive or negative.", + ], + inputs=["text"], + answer="""text_classifier(translator(text, src_lang="Spanish", tgt_lang="English"), labels=["positive", "negative"])""", + ), + Problem( + task=[ + "Tell me out loud what the `image` contains.", + "Describe the following `image` out loud.", + "Find what is in the picture stored in `image` then read it out loud.", + ], + inputs=["image"], + answer=[ + "text_reader(image_captioner(image))", + "text_reader(image_qa(image, question='What is in the image?'))", + ], + ), + Problem( + task=[ + "Generate an image from the text given in `text_input`. Then transform it according to the text in `prompt`.", + "Use the following `text_input` to generate an image, then transform it by using the text in `prompt`.", + ], + inputs=["text_input", "prompt"], + answer="image_transformer(image_generator(text_input), prompt)", + ), + Problem( + task=[ + "Download the content of `url`, summarize it then generate an image from its content.", + "Use a summary of the web page at `url` to generate an image.", + "Summarize the content of the web page at `url`, and use the result to generate an image.", + ], + inputs=["url"], + answer="image_generator(summarizer(text_downloader(url)))", + ), + Problem( + task=[ + "Transform the following `image` using the prompt in `text`. The prompt is in Spanish.", + "Use the text prompt in `text` (in Spanish) to transform the following `image`.", + "Translate the `text` from Spanish to English then use it to transform the picture in `image`.", + ], + inputs=["text", "image"], + answer="image_transformer(image, translator(text, src_lang='Spanish', tgt_lang='English'))", + ), + Problem( + task=[ + "Download the content of `url`, summarize it then read it out loud to me.", + "Read me a summary of the web page at `url`.", + ], + inputs=["url"], + answer="text_reader(summarizer(text_downloader(url)))", + ), + Problem( + task=[ + "Generate an image from the text given in `text_input`.", + ], + inputs=["text_input"], + answer="image_generator(text_input)", + ), + Problem( + task=[ + "Replace the beaver in the `image` by the `prompt`.", + "Transform the `image` so that it contains the `prompt`.", + "Use `prompt` to transform this `image`.", + ], + inputs=["image", "prompt"], + answer="image_transformer(image, prompt)", + ), + Problem( + task=[ + "Provide me the summary of the `text`, then read it to me before transcribing it and translating it in French.", + "Summarize `text`, read it out loud then transcribe the audio and translate it in French.", + "Read me a summary of the `text` out loud. Transcribe this and translate it in French.", + ], + inputs=["text"], + answer="translator(transcriber(text_reader(summarizer(text))), src_lang='English', tgt_lang='French')", + ), + Problem( + task=["Generate a video of the `prompt`", "Animate a `prompt`", "Make me a short video using `prompt`."], + inputs={"prompt": "A lobster swimming"}, + answer="video_generator('A lobster swimming')", + ), + Problem( + task=[ + "Download the following file `url`, summarize it in a few words and generate a video from it." + "Fetch the file at this `url`, summarize it, and create an animation out of it." + ], + inputs=["url"], + answer="video_generator(summarizer(text_downloader(url)))", + ), +] + + +EVALUATION_CHATS = [ + [ + Problem( + task=[ + "Translate the following `text` from Spanish to English.", + "Translate the following `text` from Spanish to English.", + ], + inputs=["text"], + answer="translated_text=translator(text, src_lang='Spanish', tgt_lang='English')", + ), + Problem( + task=[ + "Is it positive or negative?", + "Tell me if its positive or negative.", + ], + inputs=[], + answer="text_classifier(translated_text, labels=['positive', 'negative'])", + ), + ], + [ + Problem( + task=[ + "What does this `image` contain?", + "Describe the following `image`.", + "Find what is in the picture stored in `image`", + ], + inputs=["image"], + answer=[ + "description=image_captioner(image)", + "description=image_qa(image, question='What is in the image?')", + ], + ), + Problem( + task=["Now, read the description out loud.", "Great! Can you read it out loud?", "Read it out loud."], + inputs=[], + answer=["audio=text_reader(description)", "audio=text_reader(description)"], + ), + ], + [ + Problem( + task=[ + "Generate an image from the text given in `text_input`.", + "Use the following `text_input` to generate an image", + ], + inputs=["text_input"], + answer="image = image_generator(text_input)", + ), + Problem( + task=[ + "Transform it according to the text in `prompt`.", + "Transform it by using the text in `prompt`.", + ], + inputs=["prompt"], + answer="image_transformer(image, prompt)", + ), + ], + [ + Problem( + task=[ + "Download the content of `url` and summarize it.", + "Summarize the content of the web page at `url`.", + ], + inputs=["url"], + answer="summary = summarizer(text_downloader(url))", + ), + Problem( + task=[ + "Generate an image from its content.", + "Use the previous result to generate an image.", + ], + inputs=[], + answer="image_generator(summary)", + ), + ], + [ + Problem( + task=[ + "Translate this Spanish `text` in English.", + "Translate the `text` from Spanish to English.", + ], + inputs=["text"], + answer="translated_text = translator(text, src_lang='Spanish', tgt_lang='English')", + ), + Problem( + task=[ + "Transform the following `image` using the translated `text`.", + "Use the previous result to transform the following `image`.", + ], + inputs=["image"], + answer="image_transformer(image, translated_text)", + ), + ], + [ + Problem( + task=["Download the content of `url`.", "Get me the text on the weg page `url`."], + inputs=["url"], + answer="text = text_downloader(url)", + ), + Problem( + task=["Summarize this text.", "Summarize this text."], + inputs=[], + answer="summary = summarizer(text)", + ), + Problem( + task=["Read it out loud to me.", "Read me the previous result."], + inputs=[], + answer="text_reader(summary)", + ), + ], + [ + Problem( + task=[ + "Generate an image from the text given in `text_input`.", + ], + inputs=["text_input"], + answer="image_generator(text_input)", + ), + ], + [ + Problem( + task=[ + "Replace the beaver in the `image` by the `prompt`.", + "Transform the `image` so that it contains the `prompt`.", + "Use `prompt` to transform this `image`.", + ], + inputs=["image", "prompt"], + answer="image_transformer(image, prompt)", + ), + ], + [ + Problem( + task=["Provide me the summary of the `text`.", "Summarize `text`."], + inputs=["text"], + answer="summary = summarizer(text)", + ), + Problem( + task=["Read this summary to me.", "Read it out loud."], + inputs=[], + answer="audio = text_reader(summarizer(text))", + ), + Problem( + task=["Transcribing the previous result back in text.", "Transcribe the audio."], + inputs=[], + answer="text = transcriber(audio)", + ), + Problem( + task=["Translating the last result in French.", "Translate this in French."], + inputs=[], + answer="translator(text, src_lang='English', tgt_lang='French')", + ), + ], + [ + Problem( + task=["Generate a video of the `prompt`", "Animate a `prompt`", "Make me a short video using `prompt`."], + inputs={"prompt": "A lobster swimming"}, + answer="video_generator('A lobster swimming')", + ), + ], + [ + Problem( + task=[ + "Download the content of `url` and summarize it.", + "Summarize the content of the web page at `url`.", + ], + inputs=["url"], + answer="summary = summarizer(text_downloader(url))", + ), + Problem( + task=["generate a video from it.", "Create an animation from the last result."], + inputs=[], + answer="video_generator(summary)", + ), + ], +] + + +def get_theoretical_tools(agent_answer, theoretical_answer, code_answer): + if not isinstance(theoretical_answer, list): + return {name for name in TEST_TOOLS if name in code_answer} + + if isinstance(agent_answer, dict): + for one_answer, one_code in zip(theoretical_answer, code_answer): + if one_answer in agent_answer.values(): + return {name for name in TEST_TOOLS if name in one_code} + + for one_answer, one_code in zip(theoretical_answer, code_answer): + if agent_answer == one_answer: + return {name for name in TEST_TOOLS if name in one_code} + + return {name for name in TEST_TOOLS if name in code_answer[0]} + + +def evaluate_code(code, inputs=None, state=None, verbose=False, return_interpretor_error=False): + tools = BASE_PYTHON_TOOLS.copy() + for name, tool in TEST_TOOLS.items(): + if name not in code: + continue + tools[name] = tool + + if isinstance(inputs, dict): + inputs = inputs.copy() + elif inputs is not None: + inputs = {inp: f"<<{inp}>>" for inp in inputs} + + if state is not None: + state.update(inputs) + else: + state = inputs + + try: + return evaluate(code, tools, state) + except InterpretorError as e: + return str(e) + except Exception as e: + if verbose: + print(e) + return None + + +def score_code(agent_answer, theoretical_answer, verbose: bool = False): + if verbose: + print(agent_answer, theoretical_answer) + theoretical_answer = theoretical_answer if isinstance(theoretical_answer, list) else [theoretical_answer] + + if agent_answer in theoretical_answer: + if verbose: + print("Perfect!") + return 1 + elif isinstance(agent_answer, dict) and any(v in theoretical_answer for v in agent_answer.values()): + if verbose: + print("Almsot perfect, result in state!") + return 0.75 + else: + if verbose: + print("Result is not the right one but code executed.") + return 0.3 + + +def evaluate_one_result(explanation, code, agent_answer, theoretical_answer, answer, verbose=False): + tools_in_explanation = {name for name in TEST_TOOLS if f"`{name}`" in explanation} + theoretical_tools = get_theoretical_tools(agent_answer, theoretical_answer, answer) + if tools_in_explanation == theoretical_tools: + tool_selection_score = 1.0 + tool_selection_errors = None + else: + missing_tools = len(theoretical_tools - tools_in_explanation) + unexpected_tools = len(tools_in_explanation - theoretical_tools) + tool_selection_score = max(0, 1.0 - 0.25 * missing_tools - 0.25 * unexpected_tools) + + tool_selection_errors = { + "selected_tools": tools_in_explanation, + "theoretical_tools": theoretical_tools, + } + + tools_in_code = {name for name in TEST_TOOLS if name in code} + if tools_in_code == theoretical_tools: + tool_used_score = 1.0 + tool_used_errors = None + else: + missing_tools = len(theoretical_tools - tools_in_code) + unexpected_tools = len(tools_in_code - theoretical_tools) + tool_used_score = max(0, 1.0 - 0.25 * missing_tools - 0.25 * unexpected_tools) + + tool_used_errors = { + "selected_tools": tools_in_explanation, + "theoretical_tools": theoretical_tools, + } + + score = score_code(agent_answer, theoretical_answer, verbose=verbose) + if score < 1.0: + code_errors = { + "code_produced": code, + "evaluation": agent_answer, + "theoretical_answer": theoretical_answer, + } + else: + code_errors = None + + return (tool_selection_score, tool_used_score, score), (tool_selection_errors, tool_used_errors, code_errors) + + +def evaluate_agent(agent, batch_size=8, verbose=False, return_errors=False): + """ + Evaluates a new agent on all `EVALUATION_TASKS`. + + Example: + + ```py + agent = NewOpenAiAgent(model="text-davinci-003", api_key=your_api_key) + bads = new_evaluate_agent(agent) + for bad in bads: + print(bad) + ``` + """ + # Sanity check + agent_tools = set(agent.toolbox.keys()) + if agent_tools != set(TEST_TOOLS): + missing_tools = set(TEST_TOOLS) - agent_tools + unexpected_tools = set(agent_tools) - TEST_TOOLS + raise ValueError( + f"Fix the test tools in the evaluate_agent module. Tools mising: {missing_tools}. Extra tools: {unexpected_tools}." + ) + + eval_tasks = [] + eval_idx = [] + for idx, pb in enumerate(EVALUATION_TASKS): + if isinstance(pb.task, list): + eval_tasks.extend(pb.task) + eval_idx.extend([idx] * len(pb.task)) + else: + eval_tasks.append(pb.task) + eval_idx.append(idx) + + tool_selection_score = 0 + tool_used_score = 0 + code_score = 0 + + if return_errors: + tool_selection_errors = {} + tool_used_errors = {} + code_errors = {} + + for start_idx in range(0, len(eval_tasks), batch_size): + end_idx = min(start_idx + batch_size, len(eval_tasks)) + batch_tasks = eval_tasks[start_idx:end_idx] + + prompts = [agent.format_prompt(task) for task in batch_tasks] + results = agent.generate_many(prompts, stop=["Task:"]) + + for idx, result in enumerate(results): + problem = EVALUATION_TASKS[eval_idx[start_idx + idx]] + if verbose: + print(f"====Task {start_idx + idx}====\n{batch_tasks[idx]}\n") + explanation, code = clean_code_for_run(result) + + # Evaluate agent answer and code answer + agent_answer = evaluate_code(code, problem.inputs, verbose=verbose) + if isinstance(problem.answer, list): + theoretical_answer = [evaluate_code(answer, problem.inputs) for answer in problem.answer] + else: + theoretical_answer = evaluate_code(problem.answer, problem.inputs) + + scores, errors = evaluate_one_result( + explanation, code, agent_answer, theoretical_answer, problem.answer, verbose=verbose + ) + + tool_selection_score += scores[0] + tool_used_score += scores[1] + code_score += scores[2] + + if return_errors: + if errors[0] is not None: + tool_selection_errors[batch_tasks[idx]] = errors[0] + if errors[1] is not None: + tool_used_errors[batch_tasks[idx]] = errors[1] + if errors[2] is not None: + code_errors[batch_tasks[idx]] = errors[2] + + scores = { + "tool selection score": 100 * (tool_selection_score / len(eval_tasks)), + "tool used score": 100 * (tool_used_score / len(eval_tasks)), + "code score": 100 * (code_score / len(eval_tasks)), + } + + if return_errors: + return scores, tool_selection_errors, tool_used_errors, code_errors + else: + return scores + + +def evaluate_chat_agent(agent, verbose=False, return_errors=False): + """ + Evaluates a new agent on all `EVALUATION_CHATS`. + + Example: + + ```py + agent = NewOpenAiAgent(model="text-davinci-003", api_key=your_api_key) + bads = new_evaluate_agent(agent) + for bad in bads: + print(bad) + ``` + """ + # Sanity check + agent_tools = set(agent.toolbox.keys()) + if agent_tools != set(TEST_TOOLS): + missing_tools = set(TEST_TOOLS) - agent_tools + unexpected_tools = agent_tools - set(TEST_TOOLS) + raise ValueError( + f"Fix the test tools in the evaluate_agent module. Tools mising: {missing_tools}. Extra tools: {unexpected_tools}." + ) + + tool_selection_score = 0 + tool_used_score = 0 + code_score = 0 + total_steps = 0 + + if return_errors: + tool_selection_errors = {} + tool_used_errors = {} + code_errors = {} + + for chat_problem in EVALUATION_CHATS: + if isinstance(chat_problem[0].task, str): + resolved_problems = [chat_problem] + else: + resolved_problems = [ + [Problem(task=pb.task[i], inputs=pb.inputs, answer=pb.answer) for pb in chat_problem] + for i in range(len(chat_problem[0].task)) + ] + for problem in resolved_problems: + agent.prepare_for_new_chat() + agent_state = {} + theoretical_state = ( + [{} for _ in range(len(problem[0].answer))] if isinstance(problem[0].answer, list) else {} + ) + + for step, step_problem in enumerate(problem): + if verbose: + print(step_problem.task) + total_steps += 1 + prompt = agent.format_prompt(step_problem.task, chat_mode=True) + result = agent.generate_one(prompt, stop=["Human:", "====="]) + agent.chat_history = prompt + result + "\n" + + explanation, code = clean_code_for_chat(result) + + if verbose: + print(f"==Explanation from the agent==\n{explanation}") + print(f"\n==Code generated by the agent==\n{code}") + + # Evaluate agent answer and code answer + agent_answer = evaluate_code(code, step_problem.inputs, state=agent_state, verbose=verbose) + + answer = step_problem.answer + if isinstance(answer, list): + theoretical_answer = [ + evaluate_code(a, step_problem.inputs, state=state) + for a, state in zip(answer, theoretical_state) + ] + else: + theoretical_answer = evaluate_code(answer, step_problem.inputs, state=theoretical_state) + + scores, errors = evaluate_one_result( + explanation, code, agent_answer, theoretical_answer, answer, verbose=verbose + ) + + tool_selection_score += scores[0] + tool_used_score += scores[1] + code_score += scores[2] + + if return_errors: + if errors[0] is not None: + tool_selection_errors[step_problem.task] = errors[0] + if errors[1] is not None: + tool_used_errors[step_problem.task] = errors[1] + if errors[2] is not None: + code_errors[step_problem.task] = errors[2] + + scores = { + "tool selection score": 100 * (tool_selection_score / total_steps), + "tool used score": 100 * (tool_used_score / total_steps), + "code score": 100 * (code_score / total_steps), + } + + if return_errors: + return scores, tool_selection_errors, tool_used_errors, code_errors + else: + return scores diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/image_question_answering.py b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/image_question_answering.py new file mode 100644 index 0000000000000000000000000000000000000000..a9d9ef82b514778a363c9cefea301122860382f2 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/image_question_answering.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python +# coding=utf-8 + +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +import torch + +from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor +from ..utils import requires_backends +from .base import PipelineTool + + +if TYPE_CHECKING: + from PIL import Image + + +class ImageQuestionAnsweringTool(PipelineTool): + default_checkpoint = "dandelin/vilt-b32-finetuned-vqa" + description = ( + "This is a tool that answers a question about an image. It takes an input named `image` which should be the " + "image containing the information, as well as a `question` which should be the question in English. It " + "returns a text that is the answer to the question." + ) + name = "image_qa" + pre_processor_class = AutoProcessor + model_class = AutoModelForVisualQuestionAnswering + + inputs = ["image", "text"] + outputs = ["text"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + super().__init__(*args, **kwargs) + + def encode(self, image: "Image", question: str): + return self.pre_processor(image, question, return_tensors="pt") + + def forward(self, inputs): + with torch.no_grad(): + return self.model(**inputs).logits + + def decode(self, outputs): + idx = outputs.argmax(-1).item() + return self.model.config.id2label[idx] diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/image_segmentation.py b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/image_segmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..ce2615d8bfd8590fc62ba3e31db582cc43d8eec6 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/image_segmentation.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python +# coding=utf-8 + +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import numpy as np +import torch + +from ..models.clipseg import CLIPSegForImageSegmentation +from ..utils import is_vision_available, requires_backends +from .base import PipelineTool + + +if is_vision_available(): + from PIL import Image + + +class ImageSegmentationTool(PipelineTool): + description = ( + "This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image. " + "It takes two arguments named `image` which should be the original image, and `label` which should be a text " + "describing the elements what should be identified in the segmentation mask. The tool returns the mask." + ) + default_checkpoint = "CIDAS/clipseg-rd64-refined" + name = "image_segmenter" + model_class = CLIPSegForImageSegmentation + + inputs = ["image", "text"] + outputs = ["image"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + super().__init__(*args, **kwargs) + + def encode(self, image: "Image", label: str): + return self.pre_processor(text=[label], images=[image], padding=True, return_tensors="pt") + + def forward(self, inputs): + with torch.no_grad(): + logits = self.model(**inputs).logits + return logits + + def decode(self, outputs): + array = outputs.cpu().detach().numpy() + array[array <= 0] = 0 + array[array > 0] = 1 + return Image.fromarray((array * 255).astype(np.uint8)) diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/python_interpreter.py b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/python_interpreter.py new file mode 100644 index 0000000000000000000000000000000000000000..960be1a2a2654918c0cc9820745cefde20e74e9a --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/python_interpreter.py @@ -0,0 +1,253 @@ +#!/usr/bin/env python +# coding=utf-8 + +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import ast +import difflib +from collections.abc import Mapping +from typing import Any, Callable, Dict + + +class InterpretorError(ValueError): + """ + An error raised when the interpretor cannot evaluate a Python expression, due to syntax error or unsupported + operations. + """ + + pass + + +def evaluate(code: str, tools: Dict[str, Callable], state=None, chat_mode=False): + """ + Evaluate a python expression using the content of the variables stored in a state and only evaluating a given set + of functions. + + This function will recurse through the nodes of the tree provided. + + Args: + code (`str`): + The code to evaluate. + tools (`Dict[str, Callable]`): + The functions that may be called during the evaluation. Any call to another function will fail with an + `InterpretorError`. + state (`Dict[str, Any]`): + A dictionary mapping variable names to values. The `state` should contain the initial inputs but will be + updated by this function to contain all variables as they are evaluated. + chat_mode (`bool`, *optional*, defaults to `False`): + Whether or not the function is called from `Agent.chat`. + """ + try: + expression = ast.parse(code) + except SyntaxError as e: + print("The code generated by the agent is not valid.\n", e) + return + if state is None: + state = {} + result = None + for idx, node in enumerate(expression.body): + try: + line_result = evaluate_ast(node, state, tools) + except InterpretorError as e: + msg = f"Evaluation of the code stopped at line {idx} before the end because of the following error" + if chat_mode: + msg += ( + f". Copy paste the following error message and send it back to the agent:\nI get an error: '{e}'" + ) + else: + msg += f":\n{e}" + print(msg) + break + if line_result is not None: + result = line_result + + return result + + +def evaluate_ast(expression: ast.AST, state: Dict[str, Any], tools: Dict[str, Callable]): + """ + Evaluate an absract syntax tree using the content of the variables stored in a state and only evaluating a given + set of functions. + + This function will recurse trough the nodes of the tree provided. + + Args: + expression (`ast.AST`): + The code to evaluate, as an abastract syntax tree. + state (`Dict[str, Any]`): + A dictionary mapping variable names to values. The `state` is updated if need be when the evaluation + encounters assignements. + tools (`Dict[str, Callable]`): + The functions that may be called during the evaluation. Any call to another function will fail with an + `InterpretorError`. + """ + if isinstance(expression, ast.Assign): + # Assignement -> we evaluate the assignement which should update the state + # We return the variable assigned as it may be used to determine the final result. + return evaluate_assign(expression, state, tools) + elif isinstance(expression, ast.Call): + # Function call -> we return the value of the function call + return evaluate_call(expression, state, tools) + elif isinstance(expression, ast.Constant): + # Constant -> just return the value + return expression.value + elif isinstance(expression, ast.Dict): + # Dict -> evaluate all keys and values + keys = [evaluate_ast(k, state, tools) for k in expression.keys] + values = [evaluate_ast(v, state, tools) for v in expression.values] + return dict(zip(keys, values)) + elif isinstance(expression, ast.Expr): + # Expression -> evaluate the content + return evaluate_ast(expression.value, state, tools) + elif isinstance(expression, ast.For): + # For loop -> execute the loop + return evaluate_for(expression, state, tools) + elif isinstance(expression, ast.FormattedValue): + # Formatted value (part of f-string) -> evaluate the content and return + return evaluate_ast(expression.value, state, tools) + elif isinstance(expression, ast.If): + # If -> execute the right branch + return evaluate_if(expression, state, tools) + elif hasattr(ast, "Index") and isinstance(expression, ast.Index): + return evaluate_ast(expression.value, state, tools) + elif isinstance(expression, ast.JoinedStr): + return "".join([str(evaluate_ast(v, state, tools)) for v in expression.values]) + elif isinstance(expression, ast.List): + # List -> evaluate all elements + return [evaluate_ast(elt, state, tools) for elt in expression.elts] + elif isinstance(expression, ast.Name): + # Name -> pick up the value in the state + return evaluate_name(expression, state, tools) + elif isinstance(expression, ast.Subscript): + # Subscript -> return the value of the indexing + return evaluate_subscript(expression, state, tools) + else: + # For now we refuse anything else. Let's add things as we need them. + raise InterpretorError(f"{expression.__class__.__name__} is not supported.") + + +def evaluate_assign(assign, state, tools): + var_names = assign.targets + result = evaluate_ast(assign.value, state, tools) + + if len(var_names) == 1: + state[var_names[0].id] = result + else: + if len(result) != len(var_names): + raise InterpretorError(f"Expected {len(var_names)} values but got {len(result)}.") + for var_name, r in zip(var_names, result): + state[var_name.id] = r + return result + + +def evaluate_call(call, state, tools): + if not isinstance(call.func, ast.Name): + raise InterpretorError( + f"It is not permitted to evaluate other functions than the provided tools (tried to execute {call.func} of " + f"type {type(call.func)}." + ) + func_name = call.func.id + if func_name not in tools: + raise InterpretorError( + f"It is not permitted to evaluate other functions than the provided tools (tried to execute {call.func.id})." + ) + + func = tools[func_name] + # Todo deal with args + args = [evaluate_ast(arg, state, tools) for arg in call.args] + kwargs = {keyword.arg: evaluate_ast(keyword.value, state, tools) for keyword in call.keywords} + return func(*args, **kwargs) + + +def evaluate_subscript(subscript, state, tools): + index = evaluate_ast(subscript.slice, state, tools) + value = evaluate_ast(subscript.value, state, tools) + if isinstance(value, (list, tuple)): + return value[int(index)] + if index in value: + return value[index] + if isinstance(index, str) and isinstance(value, Mapping): + close_matches = difflib.get_close_matches(index, list(value.keys())) + if len(close_matches) > 0: + return value[close_matches[0]] + + raise InterpretorError(f"Could not index {value} with '{index}'.") + + +def evaluate_name(name, state, tools): + if name.id in state: + return state[name.id] + close_matches = difflib.get_close_matches(name.id, list(state.keys())) + if len(close_matches) > 0: + return state[close_matches[0]] + raise InterpretorError(f"The variable `{name.id}` is not defined.") + + +def evaluate_condition(condition, state, tools): + if len(condition.ops) > 1: + raise InterpretorError("Cannot evaluate conditions with multiple operators") + + left = evaluate_ast(condition.left, state, tools) + comparator = condition.ops[0] + right = evaluate_ast(condition.comparators[0], state, tools) + + if isinstance(comparator, ast.Eq): + return left == right + elif isinstance(comparator, ast.NotEq): + return left != right + elif isinstance(comparator, ast.Lt): + return left < right + elif isinstance(comparator, ast.LtE): + return left <= right + elif isinstance(comparator, ast.Gt): + return left > right + elif isinstance(comparator, ast.GtE): + return left >= right + elif isinstance(comparator, ast.Is): + return left is right + elif isinstance(comparator, ast.IsNot): + return left is not right + elif isinstance(comparator, ast.In): + return left in right + elif isinstance(comparator, ast.NotIn): + return left not in right + else: + raise InterpretorError(f"Operator not supported: {comparator}") + + +def evaluate_if(if_statement, state, tools): + result = None + if evaluate_condition(if_statement.test, state, tools): + for line in if_statement.body: + line_result = evaluate_ast(line, state, tools) + if line_result is not None: + result = line_result + else: + for line in if_statement.orelse: + line_result = evaluate_ast(line, state, tools) + if line_result is not None: + result = line_result + return result + + +def evaluate_for(for_loop, state, tools): + result = None + iterator = evaluate_ast(for_loop.iter, state, tools) + for counter in iterator: + state[for_loop.target.id] = counter + for expression in for_loop.body: + line_result = evaluate_ast(expression, state, tools) + if line_result is not None: + result = line_result + return result diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/speech_to_text.py b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/speech_to_text.py new file mode 100644 index 0000000000000000000000000000000000000000..d3b8fd29ee1ad0809cf8b003df50a470e609400f --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/speech_to_text.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python +# coding=utf-8 + +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor +from .base import PipelineTool + + +class SpeechToTextTool(PipelineTool): + default_checkpoint = "openai/whisper-base" + description = ( + "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " + "transcribed text." + ) + name = "transcriber" + pre_processor_class = WhisperProcessor + model_class = WhisperForConditionalGeneration + + inputs = ["audio"] + outputs = ["text"] + + def encode(self, audio): + return self.pre_processor(audio, return_tensors="pt").input_features + + def forward(self, inputs): + return self.model.generate(inputs=inputs) + + def decode(self, outputs): + return self.pre_processor.batch_decode(outputs, skip_special_tokens=True)[0] diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/text_classification.py b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/text_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..f04cdc05b6ac67cd285a1011d83a7bb2854adfe1 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/text_classification.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python +# coding=utf-8 + +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch + +from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer +from .base import PipelineTool + + +class TextClassificationTool(PipelineTool): + """ + Example: + + ```py + from transformers.tools import TextClassificationTool + + classifier = TextClassificationTool() + classifier("This is a super nice API!", labels=["positive", "negative"]) + ``` + """ + + default_checkpoint = "facebook/bart-large-mnli" + description = ( + "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " + "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " + "It returns the most likely label in the list of provided `labels` for the input text." + ) + name = "text_classifier" + pre_processor_class = AutoTokenizer + model_class = AutoModelForSequenceClassification + + inputs = ["text", ["text"]] + outputs = ["text"] + + def setup(self): + super().setup() + config = self.model.config + self.entailment_id = -1 + for idx, label in config.id2label.items(): + if label.lower().startswith("entail"): + self.entailment_id = int(idx) + if self.entailment_id == -1: + raise ValueError("Could not determine the entailment ID from the model config, please pass it at init.") + + def encode(self, text, labels): + self._labels = labels + return self.pre_processor( + [text] * len(labels), + [f"This example is {label}" for label in labels], + return_tensors="pt", + padding="max_length", + ) + + def decode(self, outputs): + logits = outputs.logits + label_id = torch.argmax(logits[:, 2]).item() + return self._labels[label_id] diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/text_question_answering.py b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/text_question_answering.py new file mode 100644 index 0000000000000000000000000000000000000000..2a7c2fc09a63499871bc729825b812c79348c762 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/text_question_answering.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python +# coding=utf-8 + +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from ..models.auto import AutoModelForSeq2SeqLM, AutoTokenizer +from .base import PipelineTool + + +QA_PROMPT = """Here is a text containing a lot of information: '''{text}'''. + +Can you answer this question about the text: '{question}'""" + + +class TextQuestionAnsweringTool(PipelineTool): + default_checkpoint = "google/flan-t5-base" + description = ( + "This is a tool that answers questions related to a text. It takes two arguments named `text`, which is the " + "text where to find the answer, and `question`, which is the question, and returns the answer to the question." + ) + name = "text_qa" + pre_processor_class = AutoTokenizer + model_class = AutoModelForSeq2SeqLM + + inputs = ["text", "text"] + outputs = ["text"] + + def encode(self, text: str, question: str): + prompt = QA_PROMPT.format(text=text, question=question) + return self.pre_processor(prompt, return_tensors="pt") + + def forward(self, inputs): + output_ids = self.model.generate(**inputs) + + in_b, _ = inputs["input_ids"].shape + out_b = output_ids.shape[0] + + return output_ids.reshape(in_b, out_b // in_b, *output_ids.shape[1:])[0][0] + + def decode(self, outputs): + return self.pre_processor.decode(outputs, skip_special_tokens=True, clean_up_tokenization_spaces=True) diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/text_summarization.py b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/text_summarization.py new file mode 100644 index 0000000000000000000000000000000000000000..8eedf234ae50b51e23e829cae2b8de4f3ad287e5 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/text_summarization.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python +# coding=utf-8 + +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from ..models.auto import AutoModelForSeq2SeqLM, AutoTokenizer +from .base import PipelineTool + + +class TextSummarizationTool(PipelineTool): + """ + Example: + + ```py + from transformers.tools import TextSummarizationTool + + summarizer = TextSummarizationTool() + summarizer(long_text) + ``` + """ + + default_checkpoint = "philschmid/bart-large-cnn-samsum" + description = ( + "This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, " + "and returns a summary of the text." + ) + name = "summarizer" + pre_processor_class = AutoTokenizer + model_class = AutoModelForSeq2SeqLM + + inputs = ["text"] + outputs = ["text"] + + def encode(self, text): + return self.pre_processor(text, return_tensors="pt", truncation=True) + + def forward(self, inputs): + return self.model.generate(**inputs)[0] + + def decode(self, outputs): + return self.pre_processor.decode(outputs, skip_special_tokens=True, clean_up_tokenization_spaces=True) diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/text_to_speech.py b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/text_to_speech.py new file mode 100644 index 0000000000000000000000000000000000000000..9faed77b01a35c3bd9c9530cd421f02e348a13af --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/text_to_speech.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python +# coding=utf-8 + +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch + +from ..models.speecht5 import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor +from ..utils import is_datasets_available +from .base import PipelineTool + + +if is_datasets_available(): + from datasets import load_dataset + + +class TextToSpeechTool(PipelineTool): + default_checkpoint = "microsoft/speecht5_tts" + description = ( + "This is a tool that reads an English text out loud. It takes an input named `text` which should contain the " + "text to read (in English) and returns a waveform object containing the sound." + ) + name = "text_reader" + pre_processor_class = SpeechT5Processor + model_class = SpeechT5ForTextToSpeech + post_processor_class = SpeechT5HifiGan + + inputs = ["text"] + outputs = ["audio"] + + def setup(self): + if self.post_processor is None: + self.post_processor = "microsoft/speecht5_hifigan" + super().setup() + + def encode(self, text, speaker_embeddings=None): + inputs = self.pre_processor(text=text, return_tensors="pt", truncation=True) + + if speaker_embeddings is None: + if not is_datasets_available(): + raise ImportError("Datasets needs to be installed if not passing speaker embeddings.") + + embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") + speaker_embeddings = torch.tensor(embeddings_dataset[7305]["xvector"]).unsqueeze(0) + + return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings} + + def forward(self, inputs): + with torch.no_grad(): + return self.model.generate_speech(**inputs) + + def decode(self, outputs): + with torch.no_grad(): + return self.post_processor(outputs).cpu().detach() diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/translation.py b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/translation.py new file mode 100644 index 0000000000000000000000000000000000000000..50a164d5bd6f4f7b647374484bd20c95e74c5dc9 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/transformers/tools/translation.py @@ -0,0 +1,271 @@ +#!/usr/bin/env python +# coding=utf-8 + +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from ..models.auto import AutoModelForSeq2SeqLM, AutoTokenizer +from .base import PipelineTool + + +LANGUAGE_CODES = { + "Acehnese Arabic": "ace_Arab", + "Acehnese Latin": "ace_Latn", + "Mesopotamian Arabic": "acm_Arab", + "Ta'izzi-Adeni Arabic": "acq_Arab", + "Tunisian Arabic": "aeb_Arab", + "Afrikaans": "afr_Latn", + "South Levantine Arabic": "ajp_Arab", + "Akan": "aka_Latn", + "Amharic": "amh_Ethi", + "North Levantine Arabic": "apc_Arab", + "Modern Standard Arabic": "arb_Arab", + "Modern Standard Arabic Romanized": "arb_Latn", + "Najdi Arabic": "ars_Arab", + "Moroccan Arabic": "ary_Arab", + "Egyptian Arabic": "arz_Arab", + "Assamese": "asm_Beng", + "Asturian": "ast_Latn", + "Awadhi": "awa_Deva", + "Central Aymara": "ayr_Latn", + "South Azerbaijani": "azb_Arab", + "North Azerbaijani": "azj_Latn", + "Bashkir": "bak_Cyrl", + "Bambara": "bam_Latn", + "Balinese": "ban_Latn", + "Belarusian": "bel_Cyrl", + "Bemba": "bem_Latn", + "Bengali": "ben_Beng", + "Bhojpuri": "bho_Deva", + "Banjar Arabic": "bjn_Arab", + "Banjar Latin": "bjn_Latn", + "Standard Tibetan": "bod_Tibt", + "Bosnian": "bos_Latn", + "Buginese": "bug_Latn", + "Bulgarian": "bul_Cyrl", + "Catalan": "cat_Latn", + "Cebuano": "ceb_Latn", + "Czech": "ces_Latn", + "Chokwe": "cjk_Latn", + "Central Kurdish": "ckb_Arab", + "Crimean Tatar": "crh_Latn", + "Welsh": "cym_Latn", + "Danish": "dan_Latn", + "German": "deu_Latn", + "Southwestern Dinka": "dik_Latn", + "Dyula": "dyu_Latn", + "Dzongkha": "dzo_Tibt", + "Greek": "ell_Grek", + "English": "eng_Latn", + "Esperanto": "epo_Latn", + "Estonian": "est_Latn", + "Basque": "eus_Latn", + "Ewe": "ewe_Latn", + "Faroese": "fao_Latn", + "Fijian": "fij_Latn", + "Finnish": "fin_Latn", + "Fon": "fon_Latn", + "French": "fra_Latn", + "Friulian": "fur_Latn", + "Nigerian Fulfulde": "fuv_Latn", + "Scottish Gaelic": "gla_Latn", + "Irish": "gle_Latn", + "Galician": "glg_Latn", + "Guarani": "grn_Latn", + "Gujarati": "guj_Gujr", + "Haitian Creole": "hat_Latn", + "Hausa": "hau_Latn", + "Hebrew": "heb_Hebr", + "Hindi": "hin_Deva", + "Chhattisgarhi": "hne_Deva", + "Croatian": "hrv_Latn", + "Hungarian": "hun_Latn", + "Armenian": "hye_Armn", + "Igbo": "ibo_Latn", + "Ilocano": "ilo_Latn", + "Indonesian": "ind_Latn", + "Icelandic": "isl_Latn", + "Italian": "ita_Latn", + "Javanese": "jav_Latn", + "Japanese": "jpn_Jpan", + "Kabyle": "kab_Latn", + "Jingpho": "kac_Latn", + "Kamba": "kam_Latn", + "Kannada": "kan_Knda", + "Kashmiri Arabic": "kas_Arab", + "Kashmiri Devanagari": "kas_Deva", + "Georgian": "kat_Geor", + "Central Kanuri Arabic": "knc_Arab", + "Central Kanuri Latin": "knc_Latn", + "Kazakh": "kaz_Cyrl", + "Kabiyè": "kbp_Latn", + "Kabuverdianu": "kea_Latn", + "Khmer": "khm_Khmr", + "Kikuyu": "kik_Latn", + "Kinyarwanda": "kin_Latn", + "Kyrgyz": "kir_Cyrl", + "Kimbundu": "kmb_Latn", + "Northern Kurdish": "kmr_Latn", + "Kikongo": "kon_Latn", + "Korean": "kor_Hang", + "Lao": "lao_Laoo", + "Ligurian": "lij_Latn", + "Limburgish": "lim_Latn", + "Lingala": "lin_Latn", + "Lithuanian": "lit_Latn", + "Lombard": "lmo_Latn", + "Latgalian": "ltg_Latn", + "Luxembourgish": "ltz_Latn", + "Luba-Kasai": "lua_Latn", + "Ganda": "lug_Latn", + "Luo": "luo_Latn", + "Mizo": "lus_Latn", + "Standard Latvian": "lvs_Latn", + "Magahi": "mag_Deva", + "Maithili": "mai_Deva", + "Malayalam": "mal_Mlym", + "Marathi": "mar_Deva", + "Minangkabau Arabic ": "min_Arab", + "Minangkabau Latin": "min_Latn", + "Macedonian": "mkd_Cyrl", + "Plateau Malagasy": "plt_Latn", + "Maltese": "mlt_Latn", + "Meitei Bengali": "mni_Beng", + "Halh Mongolian": "khk_Cyrl", + "Mossi": "mos_Latn", + "Maori": "mri_Latn", + "Burmese": "mya_Mymr", + "Dutch": "nld_Latn", + "Norwegian Nynorsk": "nno_Latn", + "Norwegian Bokmål": "nob_Latn", + "Nepali": "npi_Deva", + "Northern Sotho": "nso_Latn", + "Nuer": "nus_Latn", + "Nyanja": "nya_Latn", + "Occitan": "oci_Latn", + "West Central Oromo": "gaz_Latn", + "Odia": "ory_Orya", + "Pangasinan": "pag_Latn", + "Eastern Panjabi": "pan_Guru", + "Papiamento": "pap_Latn", + "Western Persian": "pes_Arab", + "Polish": "pol_Latn", + "Portuguese": "por_Latn", + "Dari": "prs_Arab", + "Southern Pashto": "pbt_Arab", + "Ayacucho Quechua": "quy_Latn", + "Romanian": "ron_Latn", + "Rundi": "run_Latn", + "Russian": "rus_Cyrl", + "Sango": "sag_Latn", + "Sanskrit": "san_Deva", + "Santali": "sat_Olck", + "Sicilian": "scn_Latn", + "Shan": "shn_Mymr", + "Sinhala": "sin_Sinh", + "Slovak": "slk_Latn", + "Slovenian": "slv_Latn", + "Samoan": "smo_Latn", + "Shona": "sna_Latn", + "Sindhi": "snd_Arab", + "Somali": "som_Latn", + "Southern Sotho": "sot_Latn", + "Spanish": "spa_Latn", + "Tosk Albanian": "als_Latn", + "Sardinian": "srd_Latn", + "Serbian": "srp_Cyrl", + "Swati": "ssw_Latn", + "Sundanese": "sun_Latn", + "Swedish": "swe_Latn", + "Swahili": "swh_Latn", + "Silesian": "szl_Latn", + "Tamil": "tam_Taml", + "Tatar": "tat_Cyrl", + "Telugu": "tel_Telu", + "Tajik": "tgk_Cyrl", + "Tagalog": "tgl_Latn", + "Thai": "tha_Thai", + "Tigrinya": "tir_Ethi", + "Tamasheq Latin": "taq_Latn", + "Tamasheq Tifinagh": "taq_Tfng", + "Tok Pisin": "tpi_Latn", + "Tswana": "tsn_Latn", + "Tsonga": "tso_Latn", + "Turkmen": "tuk_Latn", + "Tumbuka": "tum_Latn", + "Turkish": "tur_Latn", + "Twi": "twi_Latn", + "Central Atlas Tamazight": "tzm_Tfng", + "Uyghur": "uig_Arab", + "Ukrainian": "ukr_Cyrl", + "Umbundu": "umb_Latn", + "Urdu": "urd_Arab", + "Northern Uzbek": "uzn_Latn", + "Venetian": "vec_Latn", + "Vietnamese": "vie_Latn", + "Waray": "war_Latn", + "Wolof": "wol_Latn", + "Xhosa": "xho_Latn", + "Eastern Yiddish": "ydd_Hebr", + "Yoruba": "yor_Latn", + "Yue Chinese": "yue_Hant", + "Chinese Simplified": "zho_Hans", + "Chinese Traditional": "zho_Hant", + "Standard Malay": "zsm_Latn", + "Zulu": "zul_Latn", +} + + +class TranslationTool(PipelineTool): + """ + Example: + + ```py + from transformers.tools import TranslationTool + + translator = TranslationTool() + translator("This is a super nice API!", src_lang="English", tgt_lang="French") + ``` + """ + + default_checkpoint = "facebook/nllb-200-distilled-600M" + description = ( + "This is a tool that translates text from a language to another. It takes three inputs: `text`, which should " + "be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, " + "which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in " + "plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`." + ) + name = "translator" + pre_processor_class = AutoTokenizer + model_class = AutoModelForSeq2SeqLM + lang_to_code = LANGUAGE_CODES + + inputs = ["text", "text", "text"] + outputs = ["text"] + + def encode(self, text, src_lang, tgt_lang): + if src_lang not in self.lang_to_code: + raise ValueError(f"{src_lang} is not a supported language.") + if tgt_lang not in self.lang_to_code: + raise ValueError(f"{tgt_lang} is not a supported language.") + src_lang = self.lang_to_code[src_lang] + tgt_lang = self.lang_to_code[tgt_lang] + return self.pre_processor._build_translation_inputs( + text, return_tensors="pt", src_lang=src_lang, tgt_lang=tgt_lang + ) + + def forward(self, inputs): + return self.model.generate(**inputs) + + def decode(self, outputs): + return self.post_processor.decode(outputs[0].tolist(), skip_special_tokens=True) diff --git a/evalkit_internvl/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_vision_objects.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_vision_objects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d30948947c91031555616a7a0fd68f2d89d955e Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_vision_objects.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/audioldm2/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/audioldm2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..23cd0e44f89217b8391d0ce236070271db9aaf83 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/audioldm2/__init__.py @@ -0,0 +1,50 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, + is_transformers_version, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["modeling_audioldm2"] = ["AudioLDM2ProjectionModel", "AudioLDM2UNet2DConditionModel"] + _import_structure["pipeline_audioldm2"] = ["AudioLDM2Pipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + + else: + from .modeling_audioldm2 import AudioLDM2ProjectionModel, AudioLDM2UNet2DConditionModel + from .pipeline_audioldm2 import AudioLDM2Pipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/audioldm2/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/audioldm2/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2bec407b6881bbc94a6ace6a6ed9fdf0f7146ee7 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/audioldm2/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/wuerstchen/__pycache__/modeling_wuerstchen_prior.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/wuerstchen/__pycache__/modeling_wuerstchen_prior.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9316a6ad6955440a2863d907e8c6d377407e9f3 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/wuerstchen/__pycache__/modeling_wuerstchen_prior.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/wuerstchen/__pycache__/pipeline_wuerstchen_prior.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/wuerstchen/__pycache__/pipeline_wuerstchen_prior.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9be3b0b9551464d71aee237d50ec007a9b871c4c Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/wuerstchen/__pycache__/pipeline_wuerstchen_prior.cpython-310.pyc differ