hc99 commited on
Commit
1856027
·
verified ·
1 Parent(s): c61e0be

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. testbed/googleapis__python-aiplatform/.coveragerc +16 -0
  3. testbed/googleapis__python-aiplatform/.flake8 +33 -0
  4. testbed/googleapis__python-aiplatform/.gitignore +64 -0
  5. testbed/googleapis__python-aiplatform/.pre-commit-config.yaml +31 -0
  6. testbed/googleapis__python-aiplatform/.release-please-manifest.json +3 -0
  7. testbed/googleapis__python-aiplatform/.repo-metadata.json +14 -0
  8. testbed/googleapis__python-aiplatform/.trampolinerc +61 -0
  9. testbed/googleapis__python-aiplatform/CHANGELOG.md +0 -0
  10. testbed/googleapis__python-aiplatform/CODE_OF_CONDUCT.md +95 -0
  11. testbed/googleapis__python-aiplatform/CONTRIBUTING.rst +281 -0
  12. testbed/googleapis__python-aiplatform/LICENSE +202 -0
  13. testbed/googleapis__python-aiplatform/MANIFEST.in +25 -0
  14. testbed/googleapis__python-aiplatform/README.rst +573 -0
  15. testbed/googleapis__python-aiplatform/SECURITY.md +7 -0
  16. testbed/googleapis__python-aiplatform/gemini_docs/README.md +326 -0
  17. testbed/googleapis__python-aiplatform/gemini_docs/conf.py +440 -0
  18. testbed/googleapis__python-aiplatform/gemini_docs/index.rst +8 -0
  19. testbed/googleapis__python-aiplatform/gemini_docs/vertexai/vertexai.rst +97 -0
  20. testbed/googleapis__python-aiplatform/mypy.ini +3 -0
  21. testbed/googleapis__python-aiplatform/noxfile.py +591 -0
  22. testbed/googleapis__python-aiplatform/owlbot.py +205 -0
  23. testbed/googleapis__python-aiplatform/pypi/README.md +4 -0
  24. testbed/googleapis__python-aiplatform/pypi/_vertex_ai_placeholder/LICENSE +202 -0
  25. testbed/googleapis__python-aiplatform/pypi/_vertex_ai_placeholder/README.md +6 -0
  26. testbed/googleapis__python-aiplatform/pypi/_vertex_ai_placeholder/pyproject.toml +17 -0
  27. testbed/googleapis__python-aiplatform/pypi/_vertex_ai_placeholder/setup.py +247 -0
  28. testbed/googleapis__python-aiplatform/pypi/_vertex_ai_placeholder/version.py +18 -0
  29. testbed/googleapis__python-aiplatform/release-please-config.json +48 -0
  30. testbed/googleapis__python-aiplatform/renovate.json +12 -0
  31. testbed/googleapis__python-aiplatform/sdk_schema_tests/__init__.py +14 -0
  32. testbed/googleapis__python-aiplatform/sdk_schema_tests/common_contract.py +24 -0
  33. testbed/googleapis__python-aiplatform/sdk_schema_tests/method_signature_tests/__init__.py +14 -0
  34. testbed/googleapis__python-aiplatform/sdk_schema_tests/method_signature_tests/method_signature_tests.py +54 -0
  35. testbed/googleapis__python-aiplatform/setup.cfg +19 -0
  36. testbed/googleapis__python-aiplatform/setup.py +300 -0
  37. testbed/googleapis__python-aiplatform/testing/constraints-langchain.txt +3 -0
  38. testbed/googleapis__python-aiplatform/testing/constraints-ray-2.33.0.txt +13 -0
  39. testbed/googleapis__python-aiplatform/testing/constraints-ray-2.4.0.txt +13 -0
  40. testbed/googleapis__python-aiplatform/tests/system/__init__.py +15 -0
  41. testbed/googleapis__python-aiplatform/tests/system/aiplatform/e2e_base.py +216 -0
  42. testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_custom_job.py +185 -0
  43. testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_dataset.py +436 -0
  44. testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_e2e_forecasting.py +395 -0
  45. testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_e2e_metadata_schema.py +121 -0
  46. testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_e2e_tabular.py +221 -0
  47. testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_experiments.py +769 -0
  48. testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_featurestore.py +714 -0
  49. testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_initializer.py +59 -0
  50. testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_language_models.py +559 -0
.gitattributes CHANGED
@@ -74,3 +74,5 @@ testbed/scverse__scanpy/scanpy/tests/_data/10x-10k-subset.zarr/X/3.0 filter=lfs
74
  testbed/scverse__scanpy/scanpy/tests/_data/10x-10k-subset.zarr/X/0.0 filter=lfs diff=lfs merge=lfs -text
75
  testbed/scverse__scanpy/scanpy/tests/_data/10x-10k-subset.zarr/X/4.0 filter=lfs diff=lfs merge=lfs -text
76
  testbed/scverse__scanpy/scanpy/datasets/10x_pbmc68k_reduced.h5ad filter=lfs diff=lfs merge=lfs -text
 
 
 
74
  testbed/scverse__scanpy/scanpy/tests/_data/10x-10k-subset.zarr/X/0.0 filter=lfs diff=lfs merge=lfs -text
75
  testbed/scverse__scanpy/scanpy/tests/_data/10x-10k-subset.zarr/X/4.0 filter=lfs diff=lfs merge=lfs -text
76
  testbed/scverse__scanpy/scanpy/datasets/10x_pbmc68k_reduced.h5ad filter=lfs diff=lfs merge=lfs -text
77
+ testbed/scikit-learn__scikit-learn/doc/logos/identity.pdf filter=lfs diff=lfs merge=lfs -text
78
+ testbed/pyvista__pyvista/tests/plotting/fonts/Mplus2-Regular.ttf filter=lfs diff=lfs merge=lfs -text
testbed/googleapis__python-aiplatform/.coveragerc ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [run]
2
+ branch = True
3
+
4
+ [report]
5
+ show_missing = True
6
+ omit =
7
+ google/cloud/aiplatform/vizier/pyvizier/*
8
+ google/cloud/aiplatform_v1/*
9
+ google/cloud/aiplatform_v1beta1/*
10
+ google/cloud/aiplatform/v1/schema/*
11
+ google/cloud/aiplatform/v1beta1/schema/*
12
+ exclude_lines =
13
+ # Re-enable the standard pragma
14
+ pragma: NO COVER
15
+ # Ignore debug-only repr
16
+ def __repr__
testbed/googleapis__python-aiplatform/.flake8 ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ #
3
+ # Copyright 2023 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # https://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # Generated by synthtool. DO NOT EDIT!
18
+ [flake8]
19
+ ignore = E203, E231, E266, E501, W503
20
+ exclude =
21
+ # Exclude generated code.
22
+ **/proto/**
23
+ **/gapic/**
24
+ **/services/**
25
+ **/types/**
26
+ *_pb2.py
27
+
28
+ # Standard linting exemptions.
29
+ **/.nox/**
30
+ __pycache__,
31
+ .git,
32
+ *.pyc,
33
+ conf.py
testbed/googleapis__python-aiplatform/.gitignore ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.py[cod]
2
+ *.sw[op]
3
+
4
+ # C extensions
5
+ *.so
6
+
7
+ # Packages
8
+ *.egg
9
+ *.egg-info
10
+ dist
11
+ build
12
+ eggs
13
+ .eggs
14
+ parts
15
+ bin
16
+ var
17
+ sdist
18
+ develop-eggs
19
+ .installed.cfg
20
+ lib
21
+ lib64
22
+ __pycache__
23
+
24
+ # Installer logs
25
+ pip-log.txt
26
+
27
+ # Unit test / coverage reports
28
+ .coverage
29
+ .nox
30
+ .cache
31
+ .pytest_cache
32
+
33
+
34
+ # Mac
35
+ .DS_Store
36
+
37
+ # JetBrains
38
+ .idea
39
+
40
+ # VS Code
41
+ .vscode
42
+
43
+ # emacs
44
+ *~
45
+
46
+ # Built documentation
47
+ docs/_build
48
+ bigquery/docs/generated
49
+ docs.metadata
50
+
51
+ # Virtual environment
52
+ env/
53
+ venv/
54
+
55
+ # Test logs
56
+ coverage.xml
57
+ *sponge_log.xml
58
+
59
+ # System test environment variables.
60
+ system_tests/local_test_setup
61
+
62
+ # Make sure a generated file isn't accidentally committed.
63
+ pylintrc
64
+ pylintrc.test
testbed/googleapis__python-aiplatform/.pre-commit-config.yaml ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ #
15
+ # See https://pre-commit.com for more information
16
+ # See https://pre-commit.com/hooks.html for more hooks
17
+ repos:
18
+ - repo: https://github.com/pre-commit/pre-commit-hooks
19
+ rev: v4.0.1
20
+ hooks:
21
+ - id: trailing-whitespace
22
+ - id: end-of-file-fixer
23
+ - id: check-yaml
24
+ - repo: https://github.com/psf/black
25
+ rev: 22.3.0
26
+ hooks:
27
+ - id: black
28
+ - repo: https://github.com/pycqa/flake8
29
+ rev: 6.1.0
30
+ hooks:
31
+ - id: flake8
testbed/googleapis__python-aiplatform/.release-please-manifest.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ ".": "1.75.0"
3
+ }
testbed/googleapis__python-aiplatform/.repo-metadata.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "aiplatform",
3
+ "name_pretty": "AI Platform",
4
+ "product_documentation": "https://cloud.google.com/ai-platform",
5
+ "client_documentation": "https://cloud.google.com/python/docs/reference/aiplatform/latest",
6
+ "issue_tracker": "https://issuetracker.google.com/savedsearches/559744",
7
+ "release_level": "stable",
8
+ "language": "python",
9
+ "library_type": "GAPIC_COMBO",
10
+ "repo": "googleapis/python-aiplatform",
11
+ "distribution_name": "google-cloud-aiplatform",
12
+ "api_id": "aiplatform.googleapis.com",
13
+ "api_shortname": "aiplatform"
14
+ }
testbed/googleapis__python-aiplatform/.trampolinerc ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Add required env vars here.
16
+ required_envvars+=(
17
+ )
18
+
19
+ # Add env vars which are passed down into the container here.
20
+ pass_down_envvars+=(
21
+ "NOX_SESSION"
22
+ ###############
23
+ # Docs builds
24
+ ###############
25
+ "STAGING_BUCKET"
26
+ "V2_STAGING_BUCKET"
27
+ ##################
28
+ # Samples builds
29
+ ##################
30
+ "INSTALL_LIBRARY_FROM_SOURCE"
31
+ "RUN_TESTS_SESSION"
32
+ "BUILD_SPECIFIC_GCLOUD_PROJECT"
33
+ # Target directories.
34
+ "RUN_TESTS_DIRS"
35
+ # The nox session to run.
36
+ "RUN_TESTS_SESSION"
37
+ )
38
+
39
+ # Prevent unintentional override on the default image.
40
+ if [[ "${TRAMPOLINE_IMAGE_UPLOAD:-false}" == "true" ]] && \
41
+ [[ -z "${TRAMPOLINE_IMAGE:-}" ]]; then
42
+ echo "Please set TRAMPOLINE_IMAGE if you want to upload the Docker image."
43
+ exit 1
44
+ fi
45
+
46
+ # Define the default value if it makes sense.
47
+ if [[ -z "${TRAMPOLINE_IMAGE_UPLOAD:-}" ]]; then
48
+ TRAMPOLINE_IMAGE_UPLOAD=""
49
+ fi
50
+
51
+ if [[ -z "${TRAMPOLINE_IMAGE:-}" ]]; then
52
+ TRAMPOLINE_IMAGE=""
53
+ fi
54
+
55
+ if [[ -z "${TRAMPOLINE_DOCKERFILE:-}" ]]; then
56
+ TRAMPOLINE_DOCKERFILE=""
57
+ fi
58
+
59
+ if [[ -z "${TRAMPOLINE_BUILD_FILE:-}" ]]; then
60
+ TRAMPOLINE_BUILD_FILE=""
61
+ fi
testbed/googleapis__python-aiplatform/CHANGELOG.md ADDED
The diff for this file is too large to render. See raw diff
 
testbed/googleapis__python-aiplatform/CODE_OF_CONDUCT.md ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!-- # Generated by synthtool. DO NOT EDIT! !-->
2
+ # Code of Conduct
3
+
4
+ ## Our Pledge
5
+
6
+ In the interest of fostering an open and welcoming environment, we as
7
+ contributors and maintainers pledge to making participation in our project and
8
+ our community a harassment-free experience for everyone, regardless of age, body
9
+ size, disability, ethnicity, gender identity and expression, level of
10
+ experience, education, socio-economic status, nationality, personal appearance,
11
+ race, religion, or sexual identity and orientation.
12
+
13
+ ## Our Standards
14
+
15
+ Examples of behavior that contributes to creating a positive environment
16
+ include:
17
+
18
+ * Using welcoming and inclusive language
19
+ * Being respectful of differing viewpoints and experiences
20
+ * Gracefully accepting constructive criticism
21
+ * Focusing on what is best for the community
22
+ * Showing empathy towards other community members
23
+
24
+ Examples of unacceptable behavior by participants include:
25
+
26
+ * The use of sexualized language or imagery and unwelcome sexual attention or
27
+ advances
28
+ * Trolling, insulting/derogatory comments, and personal or political attacks
29
+ * Public or private harassment
30
+ * Publishing others' private information, such as a physical or electronic
31
+ address, without explicit permission
32
+ * Other conduct which could reasonably be considered inappropriate in a
33
+ professional setting
34
+
35
+ ## Our Responsibilities
36
+
37
+ Project maintainers are responsible for clarifying the standards of acceptable
38
+ behavior and are expected to take appropriate and fair corrective action in
39
+ response to any instances of unacceptable behavior.
40
+
41
+ Project maintainers have the right and responsibility to remove, edit, or reject
42
+ comments, commits, code, wiki edits, issues, and other contributions that are
43
+ not aligned to this Code of Conduct, or to ban temporarily or permanently any
44
+ contributor for other behaviors that they deem inappropriate, threatening,
45
+ offensive, or harmful.
46
+
47
+ ## Scope
48
+
49
+ This Code of Conduct applies both within project spaces and in public spaces
50
+ when an individual is representing the project or its community. Examples of
51
+ representing a project or community include using an official project e-mail
52
+ address, posting via an official social media account, or acting as an appointed
53
+ representative at an online or offline event. Representation of a project may be
54
+ further defined and clarified by project maintainers.
55
+
56
+ This Code of Conduct also applies outside the project spaces when the Project
57
+ Steward has a reasonable belief that an individual's behavior may have a
58
+ negative impact on the project or its community.
59
+
60
+ ## Conflict Resolution
61
+
62
+ We do not believe that all conflict is bad; healthy debate and disagreement
63
+ often yield positive results. However, it is never okay to be disrespectful or
64
+ to engage in behavior that violates the project’s code of conduct.
65
+
66
+ If you see someone violating the code of conduct, you are encouraged to address
67
+ the behavior directly with those involved. Many issues can be resolved quickly
68
+ and easily, and this gives people more control over the outcome of their
69
+ dispute. If you are unable to resolve the matter for any reason, or if the
70
+ behavior is threatening or harassing, report it. We are dedicated to providing
71
+ an environment where participants feel welcome and safe.
72
+
73
+
74
+ Reports should be directed to *googleapis-stewards@google.com*, the
75
+ Project Steward(s) for *Google Cloud Client Libraries*. It is the Project Steward’s duty to
76
+ receive and address reported violations of the code of conduct. They will then
77
+ work with a committee consisting of representatives from the Open Source
78
+ Programs Office and the Google Open Source Strategy team. If for any reason you
79
+ are uncomfortable reaching out to the Project Steward, please email
80
+ opensource@google.com.
81
+
82
+ We will investigate every complaint, but you may not receive a direct response.
83
+ We will use our discretion in determining when and how to follow up on reported
84
+ incidents, which may range from not taking action to permanent expulsion from
85
+ the project and project-sponsored spaces. We will notify the accused of the
86
+ report and provide them an opportunity to discuss it before any action is taken.
87
+ The identity of the reporter will be omitted from the details of the report
88
+ supplied to the accused. In potentially harmful situations, such as ongoing
89
+ harassment or threats to anyone's safety, we may take action without notice.
90
+
91
+ ## Attribution
92
+
93
+ This Code of Conduct is adapted from the Contributor Covenant, version 1.4,
94
+ available at
95
+ https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
testbed/googleapis__python-aiplatform/CONTRIBUTING.rst ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Generated by synthtool. DO NOT EDIT!
2
+ ############
3
+ Contributing
4
+ ############
5
+
6
+ #. **Please sign one of the contributor license agreements below.**
7
+ #. Fork the repo, develop and test your code changes, add docs.
8
+ #. Make sure that your commit messages clearly describe the changes.
9
+ #. Send a pull request. (Please Read: `Faster Pull Request Reviews`_)
10
+
11
+ .. _Faster Pull Request Reviews: https://github.com/kubernetes/community/blob/master/contributors/guide/pull-requests.md#best-practices-for-faster-reviews
12
+
13
+ .. contents:: Here are some guidelines for hacking on the Google Cloud Client libraries.
14
+
15
+ ***************
16
+ Adding Features
17
+ ***************
18
+
19
+ In order to add a feature:
20
+
21
+ - The feature must be documented in both the API and narrative
22
+ documentation.
23
+
24
+ - The feature must work fully on the following CPython versions:
25
+ 3.8, 3.9, 3.10, 3.11 and 3.12 on both UNIX and Windows.
26
+
27
+ - The feature must not add unnecessary dependencies (where
28
+ "unnecessary" is of course subjective, but new dependencies should
29
+ be discussed).
30
+
31
+ ****************************
32
+ Using a Development Checkout
33
+ ****************************
34
+
35
+ You'll have to create a development environment using a Git checkout:
36
+
37
+ - While logged into your GitHub account, navigate to the
38
+ ``python-aiplatform`` `repo`_ on GitHub.
39
+
40
+ - Fork and clone the ``python-aiplatform`` repository to your GitHub account by
41
+ clicking the "Fork" button.
42
+
43
+ - Clone your fork of ``python-aiplatform`` from your GitHub account to your local
44
+ computer, substituting your account username and specifying the destination
45
+ as ``hack-on-python-aiplatform``. E.g.::
46
+
47
+ $ cd ${HOME}
48
+ $ git clone git@github.com:USERNAME/python-aiplatform.git hack-on-python-aiplatform
49
+ $ cd hack-on-python-aiplatform
50
+ # Configure remotes such that you can pull changes from the googleapis/python-aiplatform
51
+ # repository into your local repository.
52
+ $ git remote add upstream git@github.com:googleapis/python-aiplatform.git
53
+ # fetch and merge changes from upstream into main
54
+ $ git fetch upstream
55
+ $ git merge upstream/main
56
+
57
+ Now your local repo is set up such that you will push changes to your GitHub
58
+ repo, from which you can submit a pull request.
59
+
60
+ To work on the codebase and run the tests, we recommend using ``nox``,
61
+ but you can also use a ``virtualenv`` of your own creation.
62
+
63
+ .. _repo: https://github.com/googleapis/python-aiplatform
64
+
65
+ Using ``nox``
66
+ =============
67
+
68
+ We use `nox <https://nox.readthedocs.io/en/latest/>`__ to instrument our tests.
69
+
70
+ - To test your changes, run unit tests with ``nox``::
71
+ $ nox -s unit
72
+
73
+ - To run a single unit test::
74
+
75
+ $ nox -s unit-3.12 -- -k <name of test>
76
+
77
+
78
+ .. note::
79
+
80
+ The unit tests and system tests are described in the
81
+ ``noxfile.py`` files in each directory.
82
+
83
+ .. nox: https://pypi.org/project/nox/
84
+
85
+ *****************************************
86
+ I'm getting weird errors... Can you help?
87
+ *****************************************
88
+
89
+ If the error mentions ``Python.h`` not being found,
90
+ install ``python-dev`` and try again.
91
+ On Debian/Ubuntu::
92
+
93
+ $ sudo apt-get install python-dev
94
+
95
+ ************
96
+ Coding Style
97
+ ************
98
+ - We use the automatic code formatter ``black``. You can run it using
99
+ the nox session ``blacken``. This will eliminate many lint errors. Run via::
100
+
101
+ $ nox -s blacken
102
+
103
+ - PEP8 compliance is required, with exceptions defined in the linter configuration.
104
+ If you have ``nox`` installed, you can test that you have not introduced
105
+ any non-compliant code via::
106
+
107
+ $ nox -s lint
108
+
109
+ - In order to make ``nox -s lint`` run faster, you can set some environment
110
+ variables::
111
+
112
+ export GOOGLE_CLOUD_TESTING_REMOTE="upstream"
113
+ export GOOGLE_CLOUD_TESTING_BRANCH="main"
114
+
115
+ By doing this, you are specifying the location of the most up-to-date
116
+ version of ``python-aiplatform``. The
117
+ remote name ``upstream`` should point to the official ``googleapis``
118
+ checkout and the branch should be the default branch on that remote (``main``).
119
+
120
+ - This repository contains configuration for the
121
+ `pre-commit <https://pre-commit.com/>`__ tool, which automates checking
122
+ our linters during a commit. If you have it installed on your ``$PATH``,
123
+ you can enable enforcing those checks via:
124
+
125
+ .. code-block:: bash
126
+
127
+ $ pre-commit install
128
+ pre-commit installed at .git/hooks/pre-commit
129
+
130
+ Exceptions to PEP8:
131
+
132
+ - Many unit tests use a helper method, ``_call_fut`` ("FUT" is short for
133
+ "Function-Under-Test"), which is PEP8-incompliant, but more readable.
134
+ Some also use a local variable, ``MUT`` (short for "Module-Under-Test").
135
+
136
+ ********************
137
+ Running System Tests
138
+ ********************
139
+
140
+ - To run system tests, you can execute::
141
+
142
+ # Run all system tests
143
+ $ nox -s system
144
+
145
+ # Run a single system test
146
+ $ nox -s system-3.8 -- -k <name of test>
147
+
148
+
149
+ .. note::
150
+
151
+ System tests are only configured to run under Python 3.8.
152
+ For expediency, we do not run them in older versions of Python 3.
153
+
154
+ This alone will not run the tests. You'll need to change some local
155
+ auth settings and change some configuration in your project to
156
+ run all the tests.
157
+
158
+ - System tests will be run against an actual project. You should use local credentials from gcloud when possible. See `Best practices for application authentication <https://cloud.google.com/docs/authentication/best-practices-applications#local_development_and_testing_with_the>`__. Some tests require a service account. For those tests see `Authenticating as a service account <https://cloud.google.com/docs/authentication/production>`__.
159
+
160
+ *************
161
+ Test Coverage
162
+ *************
163
+
164
+ - The codebase *must* have 100% test statement coverage after each commit.
165
+ You can test coverage via ``nox -s cover``.
166
+
167
+ ******************************************************
168
+ Documentation Coverage and Building HTML Documentation
169
+ ******************************************************
170
+
171
+ If you fix a bug, and the bug requires an API or behavior modification, all
172
+ documentation in this package which references that API or behavior must be
173
+ changed to reflect the bug fix, ideally in the same commit that fixes the bug
174
+ or adds the feature.
175
+
176
+ Build the docs via:
177
+
178
+ $ nox -s docs
179
+
180
+ *************************
181
+ Samples and code snippets
182
+ *************************
183
+
184
+ Code samples and snippets live in the `samples/` catalogue. Feel free to
185
+ provide more examples, but make sure to write tests for those examples.
186
+ Each folder containing example code requires its own `noxfile.py` script
187
+ which automates testing. If you decide to create a new folder, you can
188
+ base it on the `samples/snippets` folder (providing `noxfile.py` and
189
+ the requirements files).
190
+
191
+ The tests will run against a real Google Cloud Project, so you should
192
+ configure them just like the System Tests.
193
+
194
+ - To run sample tests, you can execute::
195
+
196
+ # Run all tests in a folder
197
+ $ cd samples/snippets
198
+ $ nox -s py-3.8
199
+
200
+ # Run a single sample test
201
+ $ cd samples/snippets
202
+ $ nox -s py-3.8 -- -k <name of test>
203
+
204
+ ********************************************
205
+ Note About ``README`` as it pertains to PyPI
206
+ ********************************************
207
+
208
+ The `description on PyPI`_ for the project comes directly from the
209
+ ``README``. Due to the reStructuredText (``rst``) parser used by
210
+ PyPI, relative links which will work on GitHub (e.g. ``CONTRIBUTING.rst``
211
+ instead of
212
+ ``https://github.com/googleapis/python-aiplatform/blob/main/CONTRIBUTING.rst``)
213
+ may cause problems creating links or rendering the description.
214
+
215
+ .. _description on PyPI: https://pypi.org/project/google-cloud-aiplatform
216
+
217
+
218
+ *************************
219
+ Supported Python Versions
220
+ *************************
221
+
222
+ We support:
223
+
224
+ - `Python 3.8`_
225
+ - `Python 3.9`_
226
+ - `Python 3.10`_
227
+ - `Python 3.11`_
228
+ - `Python 3.12`_
229
+
230
+ .. _Python 3.8: https://docs.python.org/3.8/
231
+ .. _Python 3.9: https://docs.python.org/3.9/
232
+ .. _Python 3.10: https://docs.python.org/3.10/
233
+ .. _Python 3.11: https://docs.python.org/3.11/
234
+ .. _Python 3.12: https://docs.python.org/3.12/
235
+
236
+
237
+ Supported versions can be found in our ``noxfile.py`` `config`_.
238
+
239
+ .. _config: https://github.com/googleapis/python-aiplatform/blob/main/noxfile.py
240
+
241
+
242
+ We also explicitly decided to support Python 3 beginning with version 3.8.
243
+ Reasons for this include:
244
+
245
+ - Encouraging use of newest versions of Python 3
246
+ - Taking the lead of `prominent`_ open-source `projects`_
247
+ - `Unicode literal support`_ which allows for a cleaner codebase that
248
+ works in both Python 2 and Python 3
249
+
250
+ .. _prominent: https://docs.djangoproject.com/en/1.9/faq/install/#what-python-version-can-i-use-with-django
251
+ .. _projects: http://flask.pocoo.org/docs/0.10/python3/
252
+ .. _Unicode literal support: https://www.python.org/dev/peps/pep-0414/
253
+
254
+ **********
255
+ Versioning
256
+ **********
257
+
258
+ This library follows `Semantic Versioning`_.
259
+
260
+ .. _Semantic Versioning: http://semver.org/
261
+
262
+ Some packages are currently in major version zero (``0.y.z``), which means that
263
+ anything may change at any time and the public API should not be considered
264
+ stable.
265
+
266
+ ******************************
267
+ Contributor License Agreements
268
+ ******************************
269
+
270
+ Before we can accept your pull requests you'll need to sign a Contributor
271
+ License Agreement (CLA):
272
+
273
+ - **If you are an individual writing original source code** and **you own the
274
+ intellectual property**, then you'll need to sign an
275
+ `individual CLA <https://developers.google.com/open-source/cla/individual>`__.
276
+ - **If you work for a company that wants to allow you to contribute your work**,
277
+ then you'll need to sign a
278
+ `corporate CLA <https://developers.google.com/open-source/cla/corporate>`__.
279
+
280
+ You can sign these electronically (just scroll to the bottom). After that,
281
+ we'll be able to accept your pull requests.
testbed/googleapis__python-aiplatform/LICENSE ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ Apache License
3
+ Version 2.0, January 2004
4
+ http://www.apache.org/licenses/
5
+
6
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
+
8
+ 1. Definitions.
9
+
10
+ "License" shall mean the terms and conditions for use, reproduction,
11
+ and distribution as defined by Sections 1 through 9 of this document.
12
+
13
+ "Licensor" shall mean the copyright owner or entity authorized by
14
+ the copyright owner that is granting the License.
15
+
16
+ "Legal Entity" shall mean the union of the acting entity and all
17
+ other entities that control, are controlled by, or are under common
18
+ control with that entity. For the purposes of this definition,
19
+ "control" means (i) the power, direct or indirect, to cause the
20
+ direction or management of such entity, whether by contract or
21
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
22
+ outstanding shares, or (iii) beneficial ownership of such entity.
23
+
24
+ "You" (or "Your") shall mean an individual or Legal Entity
25
+ exercising permissions granted by this License.
26
+
27
+ "Source" form shall mean the preferred form for making modifications,
28
+ including but not limited to software source code, documentation
29
+ source, and configuration files.
30
+
31
+ "Object" form shall mean any form resulting from mechanical
32
+ transformation or translation of a Source form, including but
33
+ not limited to compiled object code, generated documentation,
34
+ and conversions to other media types.
35
+
36
+ "Work" shall mean the work of authorship, whether in Source or
37
+ Object form, made available under the License, as indicated by a
38
+ copyright notice that is included in or attached to the work
39
+ (an example is provided in the Appendix below).
40
+
41
+ "Derivative Works" shall mean any work, whether in Source or Object
42
+ form, that is based on (or derived from) the Work and for which the
43
+ editorial revisions, annotations, elaborations, or other modifications
44
+ represent, as a whole, an original work of authorship. For the purposes
45
+ of this License, Derivative Works shall not include works that remain
46
+ separable from, or merely link (or bind by name) to the interfaces of,
47
+ the Work and Derivative Works thereof.
48
+
49
+ "Contribution" shall mean any work of authorship, including
50
+ the original version of the Work and any modifications or additions
51
+ to that Work or Derivative Works thereof, that is intentionally
52
+ submitted to Licensor for inclusion in the Work by the copyright owner
53
+ or by an individual or Legal Entity authorized to submit on behalf of
54
+ the copyright owner. For the purposes of this definition, "submitted"
55
+ means any form of electronic, verbal, or written communication sent
56
+ to the Licensor or its representatives, including but not limited to
57
+ communication on electronic mailing lists, source code control systems,
58
+ and issue tracking systems that are managed by, or on behalf of, the
59
+ Licensor for the purpose of discussing and improving the Work, but
60
+ excluding communication that is conspicuously marked or otherwise
61
+ designated in writing by the copyright owner as "Not a Contribution."
62
+
63
+ "Contributor" shall mean Licensor and any individual or Legal Entity
64
+ on behalf of whom a Contribution has been received by Licensor and
65
+ subsequently incorporated within the Work.
66
+
67
+ 2. Grant of Copyright License. Subject to the terms and conditions of
68
+ this License, each Contributor hereby grants to You a perpetual,
69
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70
+ copyright license to reproduce, prepare Derivative Works of,
71
+ publicly display, publicly perform, sublicense, and distribute the
72
+ Work and such Derivative Works in Source or Object form.
73
+
74
+ 3. Grant of Patent License. Subject to the terms and conditions of
75
+ this License, each Contributor hereby grants to You a perpetual,
76
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77
+ (except as stated in this section) patent license to make, have made,
78
+ use, offer to sell, sell, import, and otherwise transfer the Work,
79
+ where such license applies only to those patent claims licensable
80
+ by such Contributor that are necessarily infringed by their
81
+ Contribution(s) alone or by combination of their Contribution(s)
82
+ with the Work to which such Contribution(s) was submitted. If You
83
+ institute patent litigation against any entity (including a
84
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
85
+ or a Contribution incorporated within the Work constitutes direct
86
+ or contributory patent infringement, then any patent licenses
87
+ granted to You under this License for that Work shall terminate
88
+ as of the date such litigation is filed.
89
+
90
+ 4. Redistribution. You may reproduce and distribute copies of the
91
+ Work or Derivative Works thereof in any medium, with or without
92
+ modifications, and in Source or Object form, provided that You
93
+ meet the following conditions:
94
+
95
+ (a) You must give any other recipients of the Work or
96
+ Derivative Works a copy of this License; and
97
+
98
+ (b) You must cause any modified files to carry prominent notices
99
+ stating that You changed the files; and
100
+
101
+ (c) You must retain, in the Source form of any Derivative Works
102
+ that You distribute, all copyright, patent, trademark, and
103
+ attribution notices from the Source form of the Work,
104
+ excluding those notices that do not pertain to any part of
105
+ the Derivative Works; and
106
+
107
+ (d) If the Work includes a "NOTICE" text file as part of its
108
+ distribution, then any Derivative Works that You distribute must
109
+ include a readable copy of the attribution notices contained
110
+ within such NOTICE file, excluding those notices that do not
111
+ pertain to any part of the Derivative Works, in at least one
112
+ of the following places: within a NOTICE text file distributed
113
+ as part of the Derivative Works; within the Source form or
114
+ documentation, if provided along with the Derivative Works; or,
115
+ within a display generated by the Derivative Works, if and
116
+ wherever such third-party notices normally appear. The contents
117
+ of the NOTICE file are for informational purposes only and
118
+ do not modify the License. You may add Your own attribution
119
+ notices within Derivative Works that You distribute, alongside
120
+ or as an addendum to the NOTICE text from the Work, provided
121
+ that such additional attribution notices cannot be construed
122
+ as modifying the License.
123
+
124
+ You may add Your own copyright statement to Your modifications and
125
+ may provide additional or different license terms and conditions
126
+ for use, reproduction, or distribution of Your modifications, or
127
+ for any such Derivative Works as a whole, provided Your use,
128
+ reproduction, and distribution of the Work otherwise complies with
129
+ the conditions stated in this License.
130
+
131
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
132
+ any Contribution intentionally submitted for inclusion in the Work
133
+ by You to the Licensor shall be under the terms and conditions of
134
+ this License, without any additional terms or conditions.
135
+ Notwithstanding the above, nothing herein shall supersede or modify
136
+ the terms of any separate license agreement you may have executed
137
+ with Licensor regarding such Contributions.
138
+
139
+ 6. Trademarks. This License does not grant permission to use the trade
140
+ names, trademarks, service marks, or product names of the Licensor,
141
+ except as required for reasonable and customary use in describing the
142
+ origin of the Work and reproducing the content of the NOTICE file.
143
+
144
+ 7. Disclaimer of Warranty. Unless required by applicable law or
145
+ agreed to in writing, Licensor provides the Work (and each
146
+ Contributor provides its Contributions) on an "AS IS" BASIS,
147
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148
+ implied, including, without limitation, any warranties or conditions
149
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150
+ PARTICULAR PURPOSE. You are solely responsible for determining the
151
+ appropriateness of using or redistributing the Work and assume any
152
+ risks associated with Your exercise of permissions under this License.
153
+
154
+ 8. Limitation of Liability. In no event and under no legal theory,
155
+ whether in tort (including negligence), contract, or otherwise,
156
+ unless required by applicable law (such as deliberate and grossly
157
+ negligent acts) or agreed to in writing, shall any Contributor be
158
+ liable to You for damages, including any direct, indirect, special,
159
+ incidental, or consequential damages of any character arising as a
160
+ result of this License or out of the use or inability to use the
161
+ Work (including but not limited to damages for loss of goodwill,
162
+ work stoppage, computer failure or malfunction, or any and all
163
+ other commercial damages or losses), even if such Contributor
164
+ has been advised of the possibility of such damages.
165
+
166
+ 9. Accepting Warranty or Additional Liability. While redistributing
167
+ the Work or Derivative Works thereof, You may choose to offer,
168
+ and charge a fee for, acceptance of support, warranty, indemnity,
169
+ or other liability obligations and/or rights consistent with this
170
+ License. However, in accepting such obligations, You may act only
171
+ on Your own behalf and on Your sole responsibility, not on behalf
172
+ of any other Contributor, and only if You agree to indemnify,
173
+ defend, and hold each Contributor harmless for any liability
174
+ incurred by, or claims asserted against, such Contributor by reason
175
+ of your accepting any such warranty or additional liability.
176
+
177
+ END OF TERMS AND CONDITIONS
178
+
179
+ APPENDIX: How to apply the Apache License to your work.
180
+
181
+ To apply the Apache License to your work, attach the following
182
+ boilerplate notice, with the fields enclosed by brackets "[]"
183
+ replaced with your own identifying information. (Don't include
184
+ the brackets!) The text should be enclosed in the appropriate
185
+ comment syntax for the file format. We also recommend that a
186
+ file or class name and description of purpose be included on the
187
+ same "printed page" as the copyright notice for easier
188
+ identification within third-party archives.
189
+
190
+ Copyright [yyyy] [name of copyright owner]
191
+
192
+ Licensed under the Apache License, Version 2.0 (the "License");
193
+ you may not use this file except in compliance with the License.
194
+ You may obtain a copy of the License at
195
+
196
+ http://www.apache.org/licenses/LICENSE-2.0
197
+
198
+ Unless required by applicable law or agreed to in writing, software
199
+ distributed under the License is distributed on an "AS IS" BASIS,
200
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201
+ See the License for the specific language governing permissions and
202
+ limitations under the License.
testbed/googleapis__python-aiplatform/MANIFEST.in ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ #
3
+ # Copyright 2023 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # https://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # Generated by synthtool. DO NOT EDIT!
18
+ include README.rst LICENSE
19
+ recursive-include google *.json *.proto py.typed
20
+ recursive-include tests *
21
+ global-exclude *.py[co]
22
+ global-exclude __pycache__
23
+
24
+ # Exclude scripts for samples readmegen
25
+ prune scripts/readme-gen
testbed/googleapis__python-aiplatform/README.rst ADDED
@@ -0,0 +1,573 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Vertex AI SDK for Python
2
+ =================================================
3
+
4
+
5
+ Gemini API and Generative AI on Vertex AI
6
+ -----------------------------------------
7
+
8
+ .. note::
9
+
10
+ For Gemini API and Generative AI on Vertex AI, please reference `Vertex Generative AI SDK for Python`_
11
+ .. _Vertex Generative AI SDK for Python: https://cloud.google.com/vertex-ai/generative-ai/docs/reference/python/latest
12
+
13
+ -----------------------------------------
14
+
15
+ |GA| |pypi| |versions| |unit-tests| |system-tests| |sample-tests|
16
+
17
+ `Vertex AI`_: Google Vertex AI is an integrated suite of machine learning tools and services for building and using ML models with AutoML or custom code. It offers both novices and experts the best workbench for the entire machine learning development lifecycle.
18
+
19
+ - `Client Library Documentation`_
20
+ - `Product Documentation`_
21
+
22
+ .. |GA| image:: https://img.shields.io/badge/support-ga-gold.svg
23
+ :target: https://github.com/googleapis/google-cloud-python/blob/main/README.rst#general-availability
24
+ .. |pypi| image:: https://img.shields.io/pypi/v/google-cloud-aiplatform.svg
25
+ :target: https://pypi.org/project/google-cloud-aiplatform/
26
+ .. |versions| image:: https://img.shields.io/pypi/pyversions/google-cloud-aiplatform.svg
27
+ :target: https://pypi.org/project/google-cloud-aiplatform/
28
+ .. |unit-tests| image:: https://storage.googleapis.com/cloud-devrel-public/python-aiplatform/badges/sdk-unit-tests.svg
29
+ :target: https://storage.googleapis.com/cloud-devrel-public/python-aiplatform/badges/sdk-unit-tests.html
30
+ .. |system-tests| image:: https://storage.googleapis.com/cloud-devrel-public/python-aiplatform/badges/sdk-system-tests.svg
31
+ :target: https://storage.googleapis.com/cloud-devrel-public/python-aiplatform/badges/sdk-system-tests.html
32
+ .. |sample-tests| image:: https://storage.googleapis.com/cloud-devrel-public/python-aiplatform/badges/sdk-sample-tests.svg
33
+ :target: https://storage.googleapis.com/cloud-devrel-public/python-aiplatform/badges/sdk-sample-tests.html
34
+ .. _Vertex AI: https://cloud.google.com/vertex-ai/docs
35
+ .. _Client Library Documentation: https://cloud.google.com/python/docs/reference/aiplatform/latest
36
+ .. _Product Documentation: https://cloud.google.com/vertex-ai/docs
37
+
38
+ Quick Start
39
+ -----------
40
+
41
+ In order to use this library, you first need to go through the following steps:
42
+
43
+ 1. `Select or create a Cloud Platform project.`_
44
+ 2. `Enable billing for your project.`_
45
+ 3. `Enable the Vertex AI API.`_
46
+ 4. `Setup Authentication.`_
47
+
48
+ .. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project
49
+ .. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project
50
+ .. _Enable the Vertex AI API.: https://cloud.google.com/vertex-ai/docs/start/use-vertex-ai-python-sdk
51
+ .. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html
52
+
53
+ Installation
54
+ ~~~~~~~~~~~~
55
+
56
+ Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to
57
+ create isolated Python environments. The basic problem it addresses is one of
58
+ dependencies and versions, and indirectly permissions.
59
+
60
+ With `virtualenv`_, it's possible to install this library without needing system
61
+ install permissions, and without clashing with the installed system
62
+ dependencies.
63
+
64
+ .. _virtualenv: https://virtualenv.pypa.io/en/latest/
65
+
66
+
67
+ Mac/Linux
68
+ ^^^^^^^^^
69
+
70
+ .. code-block:: console
71
+
72
+ pip install virtualenv
73
+ virtualenv <your-env>
74
+ source <your-env>/bin/activate
75
+ <your-env>/bin/pip install google-cloud-aiplatform
76
+
77
+
78
+ Windows
79
+ ^^^^^^^
80
+
81
+ .. code-block:: console
82
+
83
+ pip install virtualenv
84
+ virtualenv <your-env>
85
+ <your-env>\Scripts\activate
86
+ <your-env>\Scripts\pip.exe install google-cloud-aiplatform
87
+
88
+
89
+ Supported Python Versions
90
+ ^^^^^^^^^^^^^^^^^^^^^^^^^
91
+ Python >= 3.8
92
+
93
+ Deprecated Python Versions
94
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^
95
+ Python <= 3.7.
96
+
97
+ The last version of this library compatible with Python 3.6 is google-cloud-aiplatform==1.12.1.
98
+
99
+ Overview
100
+ ~~~~~~~~
101
+ This section provides a brief overview of the Vertex AI SDK for Python. You can also reference the notebooks in `vertex-ai-samples`_ for examples.
102
+
103
+ .. _vertex-ai-samples: https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/main/notebooks/community/sdk
104
+
105
+ All publicly available SDK features can be found in the :code:`google/cloud/aiplatform` directory.
106
+ Under the hood, Vertex SDK builds on top of GAPIC, which stands for Google API CodeGen.
107
+ The GAPIC library code sits in :code:`google/cloud/aiplatform_v1` and :code:`google/cloud/aiplatform_v1beta1`,
108
+ and it is auto-generated from Google's service proto files.
109
+
110
+ For most developers' programmatic needs, they can follow these steps to figure out which libraries to import:
111
+
112
+ 1. Look through :code:`google/cloud/aiplatform` first -- Vertex SDK's APIs will almost always be easier to use and more concise comparing with GAPIC
113
+ 2. If the feature that you are looking for cannot be found there, look through :code:`aiplatform_v1` to see if it's available in GAPIC
114
+ 3. If it is still in beta phase, it will be available in :code:`aiplatform_v1beta1`
115
+
116
+ If none of the above scenarios could help you find the right tools for your task, please feel free to open a github issue and send us a feature request.
117
+
118
+ Importing
119
+ ^^^^^^^^^
120
+ Vertex AI SDK resource based functionality can be used by importing the following namespace:
121
+
122
+ .. code-block:: Python
123
+
124
+ from google.cloud import aiplatform
125
+
126
+ Initialization
127
+ ^^^^^^^^^^^^^^
128
+ Initialize the SDK to store common configurations that you use with the SDK.
129
+
130
+ .. code-block:: Python
131
+
132
+ aiplatform.init(
133
+ # your Google Cloud Project ID or number
134
+ # environment default used is not set
135
+ project='my-project',
136
+
137
+ # the Vertex AI region you will use
138
+ # defaults to us-central1
139
+ location='us-central1',
140
+
141
+ # Google Cloud Storage bucket in same region as location
142
+ # used to stage artifacts
143
+ staging_bucket='gs://my_staging_bucket',
144
+
145
+ # custom google.auth.credentials.Credentials
146
+ # environment default credentials used if not set
147
+ credentials=my_credentials,
148
+
149
+ # customer managed encryption key resource name
150
+ # will be applied to all Vertex AI resources if set
151
+ encryption_spec_key_name=my_encryption_key_name,
152
+
153
+ # the name of the experiment to use to track
154
+ # logged metrics and parameters
155
+ experiment='my-experiment',
156
+
157
+ # description of the experiment above
158
+ experiment_description='my experiment description'
159
+ )
160
+
161
+ Datasets
162
+ ^^^^^^^^
163
+ Vertex AI provides managed tabular, text, image, and video datasets. In the SDK, datasets can be used downstream to
164
+ train models.
165
+
166
+ To create a tabular dataset:
167
+
168
+ .. code-block:: Python
169
+
170
+ my_dataset = aiplatform.TabularDataset.create(
171
+ display_name="my-dataset", gcs_source=['gs://path/to/my/dataset.csv'])
172
+
173
+ You can also create and import a dataset in separate steps:
174
+
175
+ .. code-block:: Python
176
+
177
+ from google.cloud import aiplatform
178
+
179
+ my_dataset = aiplatform.TextDataset.create(
180
+ display_name="my-dataset")
181
+
182
+ my_dataset.import_data(
183
+ gcs_source=['gs://path/to/my/dataset.csv'],
184
+ import_schema_uri=aiplatform.schema.dataset.ioformat.text.multi_label_classification
185
+ )
186
+
187
+ To get a previously created Dataset:
188
+
189
+ .. code-block:: Python
190
+
191
+ dataset = aiplatform.ImageDataset('projects/my-project/location/us-central1/datasets/{DATASET_ID}')
192
+
193
+ Vertex AI supports a variety of dataset schemas. References to these schemas are available under the
194
+ :code:`aiplatform.schema.dataset` namespace. For more information on the supported dataset schemas please refer to the
195
+ `Preparing data docs`_.
196
+
197
+ .. _Preparing data docs: https://cloud.google.com/ai-platform-unified/docs/datasets/prepare
198
+
199
+ Training
200
+ ^^^^^^^^
201
+ The Vertex AI SDK for Python allows you train Custom and AutoML Models.
202
+
203
+ You can train custom models using a custom Python script, custom Python package, or container.
204
+
205
+ **Preparing Your Custom Code**
206
+
207
+ Vertex AI custom training enables you to train on Vertex AI datasets and produce Vertex AI models. To do so your
208
+ script must adhere to the following contract:
209
+
210
+ It must read datasets from the environment variables populated by the training service:
211
+
212
+ .. code-block:: Python
213
+
214
+ os.environ['AIP_DATA_FORMAT'] # provides format of data
215
+ os.environ['AIP_TRAINING_DATA_URI'] # uri to training split
216
+ os.environ['AIP_VALIDATION_DATA_URI'] # uri to validation split
217
+ os.environ['AIP_TEST_DATA_URI'] # uri to test split
218
+
219
+ Please visit `Using a managed dataset in a custom training application`_ for a detailed overview.
220
+
221
+ .. _Using a managed dataset in a custom training application: https://cloud.google.com/vertex-ai/docs/training/using-managed-datasets
222
+
223
+ It must write the model artifact to the environment variable populated by the training service:
224
+
225
+ .. code-block:: Python
226
+
227
+ os.environ['AIP_MODEL_DIR']
228
+
229
+ **Running Training**
230
+
231
+ .. code-block:: Python
232
+
233
+ job = aiplatform.CustomTrainingJob(
234
+ display_name="my-training-job",
235
+ script_path="training_script.py",
236
+ container_uri="us-docker.pkg.dev/vertex-ai/training/tf-cpu.2-2:latest",
237
+ requirements=["gcsfs==0.7.1"],
238
+ model_serving_container_image_uri="us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-2:latest",
239
+ )
240
+
241
+ model = job.run(my_dataset,
242
+ replica_count=1,
243
+ machine_type="n1-standard-4",
244
+ accelerator_type='NVIDIA_TESLA_K80',
245
+ accelerator_count=1)
246
+
247
+ In the code block above `my_dataset` is managed dataset created in the `Dataset` section above. The `model` variable is a managed Vertex AI model that can be deployed or exported.
248
+
249
+
250
+ AutoMLs
251
+ -------
252
+ The Vertex AI SDK for Python supports AutoML tabular, image, text, video, and forecasting.
253
+
254
+ To train an AutoML tabular model:
255
+
256
+ .. code-block:: Python
257
+
258
+ dataset = aiplatform.TabularDataset('projects/my-project/location/us-central1/datasets/{DATASET_ID}')
259
+
260
+ job = aiplatform.AutoMLTabularTrainingJob(
261
+ display_name="train-automl",
262
+ optimization_prediction_type="regression",
263
+ optimization_objective="minimize-rmse",
264
+ )
265
+
266
+ model = job.run(
267
+ dataset=dataset,
268
+ target_column="target_column_name",
269
+ training_fraction_split=0.6,
270
+ validation_fraction_split=0.2,
271
+ test_fraction_split=0.2,
272
+ budget_milli_node_hours=1000,
273
+ model_display_name="my-automl-model",
274
+ disable_early_stopping=False,
275
+ )
276
+
277
+
278
+ Models
279
+ ------
280
+ To get a model:
281
+
282
+
283
+ .. code-block:: Python
284
+
285
+ model = aiplatform.Model('/projects/my-project/locations/us-central1/models/{MODEL_ID}')
286
+
287
+
288
+
289
+ To upload a model:
290
+
291
+ .. code-block:: Python
292
+
293
+ model = aiplatform.Model.upload(
294
+ display_name='my-model',
295
+ artifact_uri="gs://python/to/my/model/dir",
296
+ serving_container_image_uri="us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-2:latest",
297
+ )
298
+
299
+
300
+
301
+ To deploy a model:
302
+
303
+
304
+ .. code-block:: Python
305
+
306
+ endpoint = model.deploy(machine_type="n1-standard-4",
307
+ min_replica_count=1,
308
+ max_replica_count=5
309
+ machine_type='n1-standard-4',
310
+ accelerator_type='NVIDIA_TESLA_K80',
311
+ accelerator_count=1)
312
+
313
+
314
+ Please visit `Importing models to Vertex AI`_ for a detailed overview:
315
+
316
+ .. _Importing models to Vertex AI: https://cloud.google.com/vertex-ai/docs/general/import-model
317
+
318
+ Model Evaluation
319
+ ----------------
320
+
321
+ The Vertex AI SDK for Python currently supports getting model evaluation metrics for all AutoML models.
322
+
323
+ To list all model evaluations for a model:
324
+
325
+ .. code-block:: Python
326
+
327
+ model = aiplatform.Model('projects/my-project/locations/us-central1/models/{MODEL_ID}')
328
+
329
+ evaluations = model.list_model_evaluations()
330
+
331
+
332
+ To get the model evaluation resource for a given model:
333
+
334
+ .. code-block:: Python
335
+
336
+ model = aiplatform.Model('projects/my-project/locations/us-central1/models/{MODEL_ID}')
337
+
338
+ # returns the first evaluation with no arguments, you can also pass the evaluation ID
339
+ evaluation = model.get_model_evaluation()
340
+
341
+ eval_metrics = evaluation.metrics
342
+
343
+
344
+ You can also create a reference to your model evaluation directly by passing in the resource name of the model evaluation:
345
+
346
+ .. code-block:: Python
347
+
348
+ evaluation = aiplatform.ModelEvaluation(
349
+ evaluation_name='projects/my-project/locations/us-central1/models/{MODEL_ID}/evaluations/{EVALUATION_ID}')
350
+
351
+ Alternatively, you can create a reference to your evaluation by passing in the model and evaluation IDs:
352
+
353
+ .. code-block:: Python
354
+
355
+ evaluation = aiplatform.ModelEvaluation(
356
+ evaluation_name={EVALUATION_ID},
357
+ model_id={MODEL_ID})
358
+
359
+
360
+ Batch Prediction
361
+ ----------------
362
+
363
+ To create a batch prediction job:
364
+
365
+ .. code-block:: Python
366
+
367
+ model = aiplatform.Model('/projects/my-project/locations/us-central1/models/{MODEL_ID}')
368
+
369
+ batch_prediction_job = model.batch_predict(
370
+ job_display_name='my-batch-prediction-job',
371
+ instances_format='csv',
372
+ machine_type='n1-standard-4',
373
+ gcs_source=['gs://path/to/my/file.csv'],
374
+ gcs_destination_prefix='gs://path/to/my/batch_prediction/results/',
375
+ service_account='my-sa@my-project.iam.gserviceaccount.com'
376
+ )
377
+
378
+ You can also create a batch prediction job asynchronously by including the `sync=False` argument:
379
+
380
+ .. code-block:: Python
381
+
382
+ batch_prediction_job = model.batch_predict(..., sync=False)
383
+
384
+ # wait for resource to be created
385
+ batch_prediction_job.wait_for_resource_creation()
386
+
387
+ # get the state
388
+ batch_prediction_job.state
389
+
390
+ # block until job is complete
391
+ batch_prediction_job.wait()
392
+
393
+
394
+ Endpoints
395
+ ---------
396
+
397
+ To create an endpoint:
398
+
399
+ .. code-block:: Python
400
+
401
+ endpoint = aiplatform.Endpoint.create(display_name='my-endpoint')
402
+
403
+ To deploy a model to a created endpoint:
404
+
405
+ .. code-block:: Python
406
+
407
+ model = aiplatform.Model('/projects/my-project/locations/us-central1/models/{MODEL_ID}')
408
+
409
+ endpoint.deploy(model,
410
+ min_replica_count=1,
411
+ max_replica_count=5,
412
+ machine_type='n1-standard-4',
413
+ accelerator_type='NVIDIA_TESLA_K80',
414
+ accelerator_count=1)
415
+
416
+ To get predictions from endpoints:
417
+
418
+ .. code-block:: Python
419
+
420
+ endpoint.predict(instances=[[6.7, 3.1, 4.7, 1.5], [4.6, 3.1, 1.5, 0.2]])
421
+
422
+ To undeploy models from an endpoint:
423
+
424
+ .. code-block:: Python
425
+
426
+ endpoint.undeploy_all()
427
+
428
+ To delete an endpoint:
429
+
430
+ .. code-block:: Python
431
+
432
+ endpoint.delete()
433
+
434
+
435
+ Pipelines
436
+ ---------
437
+
438
+ To create a Vertex AI Pipeline run and monitor until completion:
439
+
440
+ .. code-block:: Python
441
+
442
+ # Instantiate PipelineJob object
443
+ pl = PipelineJob(
444
+ display_name="My first pipeline",
445
+
446
+ # Whether or not to enable caching
447
+ # True = always cache pipeline step result
448
+ # False = never cache pipeline step result
449
+ # None = defer to cache option for each pipeline component in the pipeline definition
450
+ enable_caching=False,
451
+
452
+ # Local or GCS path to a compiled pipeline definition
453
+ template_path="pipeline.json",
454
+
455
+ # Dictionary containing input parameters for your pipeline
456
+ parameter_values=parameter_values,
457
+
458
+ # GCS path to act as the pipeline root
459
+ pipeline_root=pipeline_root,
460
+ )
461
+
462
+ # Execute pipeline in Vertex AI and monitor until completion
463
+ pl.run(
464
+ # Email address of service account to use for the pipeline run
465
+ # You must have iam.serviceAccounts.actAs permission on the service account to use it
466
+ service_account=service_account,
467
+
468
+ # Whether this function call should be synchronous (wait for pipeline run to finish before terminating)
469
+ # or asynchronous (return immediately)
470
+ sync=True
471
+ )
472
+
473
+ To create a Vertex AI Pipeline without monitoring until completion, use `submit` instead of `run`:
474
+
475
+ .. code-block:: Python
476
+
477
+ # Instantiate PipelineJob object
478
+ pl = PipelineJob(
479
+ display_name="My first pipeline",
480
+
481
+ # Whether or not to enable caching
482
+ # True = always cache pipeline step result
483
+ # False = never cache pipeline step result
484
+ # None = defer to cache option for each pipeline component in the pipeline definition
485
+ enable_caching=False,
486
+
487
+ # Local or GCS path to a compiled pipeline definition
488
+ template_path="pipeline.json",
489
+
490
+ # Dictionary containing input parameters for your pipeline
491
+ parameter_values=parameter_values,
492
+
493
+ # GCS path to act as the pipeline root
494
+ pipeline_root=pipeline_root,
495
+ )
496
+
497
+ # Submit the Pipeline to Vertex AI
498
+ pl.submit(
499
+ # Email address of service account to use for the pipeline run
500
+ # You must have iam.serviceAccounts.actAs permission on the service account to use it
501
+ service_account=service_account,
502
+ )
503
+
504
+
505
+ Explainable AI: Get Metadata
506
+ ----------------------------
507
+
508
+ To get metadata in dictionary format from TensorFlow 1 models:
509
+
510
+ .. code-block:: Python
511
+
512
+ from google.cloud.aiplatform.explain.metadata.tf.v1 import saved_model_metadata_builder
513
+
514
+ builder = saved_model_metadata_builder.SavedModelMetadataBuilder(
515
+ 'gs://python/to/my/model/dir', tags=[tf.saved_model.tag_constants.SERVING]
516
+ )
517
+ generated_md = builder.get_metadata()
518
+
519
+ To get metadata in dictionary format from TensorFlow 2 models:
520
+
521
+ .. code-block:: Python
522
+
523
+ from google.cloud.aiplatform.explain.metadata.tf.v2 import saved_model_metadata_builder
524
+
525
+ builder = saved_model_metadata_builder.SavedModelMetadataBuilder('gs://python/to/my/model/dir')
526
+ generated_md = builder.get_metadata()
527
+
528
+ To use Explanation Metadata in endpoint deployment and model upload:
529
+
530
+ .. code-block:: Python
531
+
532
+ explanation_metadata = builder.get_metadata_protobuf()
533
+
534
+ # To deploy a model to an endpoint with explanation
535
+ model.deploy(..., explanation_metadata=explanation_metadata)
536
+
537
+ # To deploy a model to a created endpoint with explanation
538
+ endpoint.deploy(..., explanation_metadata=explanation_metadata)
539
+
540
+ # To upload a model with explanation
541
+ aiplatform.Model.upload(..., explanation_metadata=explanation_metadata)
542
+
543
+
544
+ Cloud Profiler
545
+ ----------------------------
546
+
547
+ Cloud Profiler allows you to profile your remote Vertex AI Training jobs on demand and visualize the results in Vertex AI Tensorboard.
548
+
549
+ To start using the profiler with TensorFlow, update your training script to include the following:
550
+
551
+ .. code-block:: Python
552
+
553
+ from google.cloud.aiplatform.training_utils import cloud_profiler
554
+ ...
555
+ cloud_profiler.init()
556
+
557
+ Next, run the job with with a Vertex AI TensorBoard instance. For full details on how to do this, visit https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-overview
558
+
559
+ Finally, visit your TensorBoard in your Google Cloud Console, navigate to the "Profile" tab, and click the `Capture Profile` button. This will allow users to capture profiling statistics for the running jobs.
560
+
561
+
562
+ Next Steps
563
+ ~~~~~~~~~~
564
+
565
+ - Read the `Client Library Documentation`_ for Vertex AI
566
+ API to see other available methods on the client.
567
+ - Read the `Vertex AI API Product documentation`_ to learn
568
+ more about the product and see How-to Guides.
569
+ - View this `README`_ to see the full list of Cloud
570
+ APIs that we cover.
571
+
572
+ .. _Vertex AI API Product documentation: https://cloud.google.com/vertex-ai/docs
573
+ .. _README: https://github.com/googleapis/google-cloud-python/blob/main/README.rst
testbed/googleapis__python-aiplatform/SECURITY.md ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Security Policy
2
+
3
+ To report a security issue, please use [g.co/vulnz](https://g.co/vulnz).
4
+
5
+ The Google Security Team will respond within 5 working days of your report on g.co/vulnz.
6
+
7
+ We use g.co/vulnz for our intake, and do coordination and disclosure here using GitHub Security Advisory to privately discuss and fix the issue.
testbed/googleapis__python-aiplatform/gemini_docs/README.md ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Vertex Generative AI SDK for Python
2
+ The Vertex Generative AI SDK helps developers use Google's generative AI
3
+ [Gemini models](http://cloud.google.com/vertex-ai/docs/generative-ai/multimodal/overview)
4
+ to build AI-powered features and applications.
5
+ The SDKs support use cases like the following:
6
+
7
+ - Generate text from texts, images and videos (multimodal generation)
8
+ - Build stateful multi-turn conversations (chat)
9
+ - Function calling
10
+
11
+ ## Installation
12
+
13
+ To install the
14
+ [google-cloud-aiplatform](https://pypi.org/project/google-cloud-aiplatform/)
15
+ Python package, run the following command:
16
+
17
+ ```shell
18
+ pip3 install --upgrade --user "google-cloud-aiplatform>=1.38"
19
+ ```
20
+
21
+ ## Usage
22
+
23
+ For detailed instructions, see [quickstart](http://cloud.google.com/vertex-ai/docs/generative-ai/start/quickstarts/quickstart-multimodal) and [Introduction to multimodal classes in the Vertex AI SDK](http://cloud.google.com/vertex-ai/docs/generative-ai/multimodal/sdk-for-gemini/gemini-sdk-overview-reference).
24
+
25
+ #### Imports:
26
+ ```python
27
+ import vertexai
28
+ ```
29
+
30
+ #### Initialization:
31
+
32
+ ```python
33
+ vertexai.init(project='my-project', location='us-central1')
34
+ ```
35
+
36
+ #### Basic generation:
37
+ ```python
38
+ from vertexai.generative_models import GenerativeModel
39
+ model = GenerativeModel("gemini-pro")
40
+ print(model.generate_content("Why is sky blue?"))
41
+ ```
42
+
43
+ #### Using images and videos
44
+ ```python
45
+ from vertexai.generative_models import GenerativeModel, Image
46
+ vision_model = GenerativeModel("gemini-pro-vision")
47
+
48
+ # Local image
49
+ image = Image.load_from_file("image.jpg")
50
+ print(vision_model.generate_content(["What is shown in this image?", image]))
51
+
52
+ # Image from Cloud Storage
53
+ image_part = generative_models.Part.from_uri("gs://download.tensorflow.org/example_images/320px-Felis_catus-cat_on_snow.jpg", mime_type="image/jpeg")
54
+ print(vision_model.generate_content([image_part, "Describe this image?"]))
55
+
56
+ # Text and video
57
+ video_part = Part.from_uri("gs://cloud-samples-data/video/animals.mp4", mime_type="video/mp4")
58
+ print(vision_model.generate_content(["What is in the video? ", video_part]))
59
+ ```
60
+
61
+ #### Chat
62
+ ```python
63
+ from vertexai.generative_models import GenerativeModel, Image
64
+ vision_model = GenerativeModel("gemini-ultra-vision")
65
+ vision_chat = vision_model.start_chat()
66
+ image = Image.load_from_file("image.jpg")
67
+ print(vision_chat.send_message(["I like this image.", image]))
68
+ print(vision_chat.send_message("What things do I like?."))
69
+ ```
70
+
71
+ #### System instructions
72
+ ```python
73
+ from vertexai.generative_models import GenerativeModel
74
+ model = GenerativeModel(
75
+ "gemini-1.0-pro",
76
+ system_instruction=[
77
+ "Talk like a pirate.",
78
+ "Don't use rude words.",
79
+ ],
80
+ )
81
+ print(model.generate_content("Why is sky blue?"))
82
+ ```
83
+
84
+ #### Function calling
85
+
86
+ ```python
87
+ # First, create tools that the model is can use to answer your questions.
88
+ # Describe a function by specifying it's schema (JsonSchema format)
89
+ get_current_weather_func = generative_models.FunctionDeclaration(
90
+ name="get_current_weather",
91
+ description="Get the current weather in a given location",
92
+ parameters={
93
+ "type": "object",
94
+ "properties": {
95
+ "location": {
96
+ "type": "string",
97
+ "description": "The city and state, e.g. San Francisco, CA"
98
+ },
99
+ "unit": {
100
+ "type": "string",
101
+ "enum": [
102
+ "celsius",
103
+ "fahrenheit",
104
+ ]
105
+ }
106
+ },
107
+ "required": [
108
+ "location"
109
+ ]
110
+ },
111
+ )
112
+ # Tool is a collection of related functions
113
+ weather_tool = generative_models.Tool(
114
+ function_declarations=[get_current_weather_func],
115
+ )
116
+
117
+ # Use tools in chat:
118
+ model = GenerativeModel(
119
+ "gemini-pro",
120
+ # You can specify tools when creating a model to avoid having to send them with every request.
121
+ tools=[weather_tool],
122
+ )
123
+ chat = model.start_chat()
124
+ # Send a message to the model. The model will respond with a function call.
125
+ print(chat.send_message("What is the weather like in Boston?"))
126
+ # Then send a function response to the model. The model will use it to answer.
127
+ print(chat.send_message(
128
+ Part.from_function_response(
129
+ name="get_current_weather",
130
+ response={
131
+ "content": {"weather": "super nice"},
132
+ }
133
+ ),
134
+ ))
135
+ ```
136
+
137
+
138
+ #### Automatic Function calling
139
+
140
+ Note: The `FunctionDeclaration.from_func` converter does not support nested types for parameters. Please provide full `FunctionDeclaration` instead.
141
+
142
+ ```python
143
+ from vertexai.preview.generative_models import GenerativeModel, Tool, FunctionDeclaration, AutomaticFunctionCallingResponder
144
+
145
+ # First, create functions that the model can use to answer your questions.
146
+ def get_current_weather(location: str, unit: str = "centigrade"):
147
+ """Gets weather in the specified location.
148
+
149
+ Args:
150
+ location: The location for which to get the weather.
151
+ unit: Optional. Temperature unit. Can be Centigrade or Fahrenheit. Defaults to Centigrade.
152
+ """
153
+ return dict(
154
+ location=location,
155
+ unit=unit,
156
+ weather="Super nice, but maybe a bit hot.",
157
+ )
158
+
159
+ # Infer function schema
160
+ get_current_weather_func = FunctionDeclaration.from_func(get_current_weather)
161
+ # Tool is a collection of related functions
162
+ weather_tool = Tool(
163
+ function_declarations=[get_current_weather_func],
164
+ )
165
+
166
+ # Use tools in chat:
167
+ model = GenerativeModel(
168
+ "gemini-pro",
169
+ # You can specify tools when creating a model to avoid having to send them with every request.
170
+ tools=[weather_tool],
171
+ )
172
+
173
+ # Activate automatic function calling:
174
+ afc_responder = AutomaticFunctionCallingResponder(
175
+ # Optional:
176
+ max_automatic_function_calls=5,
177
+ )
178
+ chat = model.start_chat(responder=afc_responder)
179
+ # Send a message to the model. The model will respond with a function call.
180
+ # The SDK will automatically call the requested function and respond to the model.
181
+ # The model will use the function call response to answer the original question.
182
+ print(chat.send_message("What is the weather like in Boston?"))
183
+ ```
184
+
185
+ #### Evaluation
186
+
187
+ - To perform bring-your-own-response(BYOR) evaluation, provide the model responses in the `response` column in the dataset. If a pairwise metric is used for BYOR evaluation, provide the baseline model responses in the `baseline_model_response` column.
188
+
189
+ ```python
190
+ import pandas as pd
191
+ from vertexai.evaluation import EvalTask, MetricPromptTemplateExamples
192
+
193
+ eval_dataset = pd.DataFrame({
194
+ "prompt" : [...],
195
+ "reference": [...],
196
+ "response" : [...],
197
+ "baseline_model_response": [...],
198
+ })
199
+ eval_task = EvalTask(
200
+ dataset=eval_dataset,
201
+ metrics=[
202
+ "bleu",
203
+ "rouge_l_sum",
204
+ MetricPromptTemplateExamples.Pointwise.FLUENCY,
205
+ MetricPromptTemplateExamples.Pairwise.SAFETY
206
+ ],
207
+ experiment="my-experiment",
208
+ )
209
+ eval_result = eval_task.evaluate(experiment_run_name="eval-experiment-run")
210
+ ```
211
+ - To perform evaluation with Gemini model inference, specify the `model` parameter with a `GenerativeModel` instance. The input column name to the model is `prompt` and must be present in the dataset.
212
+
213
+ ```python
214
+ from vertexai.evaluation import EvalTask
215
+ from vertexai.generative_models import GenerativeModel
216
+
217
+ eval_dataset = pd.DataFrame({
218
+ "reference": [...],
219
+ "prompt" : [...],
220
+ })
221
+ result = EvalTask(
222
+ dataset=eval_dataset,
223
+ metrics=["exact_match", "bleu", "rouge_1", "rouge_l_sum"],
224
+ experiment="my-experiment",
225
+ ).evaluate(
226
+ model=GenerativeModel("gemini-1.5-pro"),
227
+ experiment_run_name="gemini-eval-run"
228
+ )
229
+ ```
230
+
231
+ - If a `prompt_template` is specified, the `prompt` column is not required. Prompts can be assembled from the evaluation dataset, and all prompt template variable names must be present in the dataset columns.
232
+
233
+ ```python
234
+ import pandas as pd
235
+ from vertexai.evaluation import EvalTask, MetricPromptTemplateExamples
236
+ from vertexai.generative_models import GenerativeModel
237
+
238
+ eval_dataset = pd.DataFrame({
239
+ "context" : [...],
240
+ "instruction": [...],
241
+ })
242
+ result = EvalTask(
243
+ dataset=eval_dataset,
244
+ metrics=[MetricPromptTemplateExamples.Pointwise.SUMMARIZATION_QUALITY],
245
+ ).evaluate(
246
+ model=GenerativeModel("gemini-1.5-pro"),
247
+ prompt_template="{instruction}. Article: {context}. Summary:",
248
+ )
249
+ ```
250
+
251
+ - To perform evaluation with custom model inference, specify the `model`
252
+ parameter with a custom inference function. The input column name to the
253
+ custom inference function is `prompt` and must be present in the dataset.
254
+
255
+ ```python
256
+ from openai import OpenAI
257
+ from vertexai.evaluation import EvalTask, MetricPromptTemplateExamples
258
+
259
+
260
+ client = OpenAI()
261
+ def custom_model_fn(input: str) -> str:
262
+ response = client.chat.completions.create(
263
+ model="gpt-3.5-turbo",
264
+ messages=[
265
+ {"role": "user", "content": input}
266
+ ]
267
+ )
268
+ return response.choices[0].message.content
269
+
270
+ eval_dataset = pd.DataFrame({
271
+ "prompt" : [...],
272
+ "reference": [...],
273
+ })
274
+ result = EvalTask(
275
+ dataset=eval_dataset,
276
+ metrics=[MetricPromptTemplateExamples.Pointwise.SAFETY],
277
+ experiment="my-experiment",
278
+ ).evaluate(
279
+ model=custom_model_fn,
280
+ experiment_run_name="gpt-eval-run"
281
+ )
282
+ ```
283
+
284
+ - To perform pairwise metric evaluation with model inference step, specify
285
+ the `baseline_model` input to a `PairwiseMetric` instance and the candidate
286
+ `model` input to the `EvalTask.evaluate()` function. The input column name
287
+ to both models is `prompt` and must be present in the dataset.
288
+
289
+ ```python
290
+ import pandas as pd
291
+ from vertexai.evaluation import EvalTask, MetricPromptTemplateExamples, PairwiseMetric
292
+ from vertexai.generative_models import GenerativeModel
293
+
294
+ baseline_model = GenerativeModel("gemini-1.0-pro")
295
+ candidate_model = GenerativeModel("gemini-1.5-pro")
296
+
297
+ pairwise_groundedness = PairwiseMetric(
298
+ metric_prompt_template=MetricPromptTemplateExamples.get_prompt_template(
299
+ "pairwise_groundedness"
300
+ ),
301
+ baseline_model=baseline_model,
302
+ )
303
+ eval_dataset = pd.DataFrame({
304
+ "prompt" : [...],
305
+ })
306
+ result = EvalTask(
307
+ dataset=eval_dataset,
308
+ metrics=[pairwise_groundedness],
309
+ experiment="my-pairwise-experiment",
310
+ ).evaluate(
311
+ model=candidate_model,
312
+ experiment_run_name="gemini-pairwise-eval-run",
313
+ )
314
+ ```
315
+
316
+ ## Documentation
317
+
318
+ You can find complete documentation for the Vertex AI SDKs and the Gemini model in the Google Cloud [documentation](https://cloud.google.com/vertex-ai/docs/generative-ai/learn/overview)
319
+
320
+ ## Contributing
321
+
322
+ See [Contributing](https://github.com/googleapis/python-aiplatform/blob/main/CONTRIBUTING.rst) for more information on contributing to the Vertex AI Python SDK.
323
+
324
+ ## License
325
+
326
+ The contents of this repository are licensed under the [Apache License, version 2.0](http://www.apache.org/licenses/LICENSE-2.0).
testbed/googleapis__python-aiplatform/gemini_docs/conf.py ADDED
@@ -0,0 +1,440 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2021 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ # google-cloud-aiplatform documentation build configuration file
17
+ #
18
+ # This file is execfile()d with the current directory set to its
19
+ # containing dir.
20
+ #
21
+ # Note that not all possible configuration values are present in this
22
+ # autogenerated file.
23
+ #
24
+ # All configuration values have a default; values that are commented out
25
+ # serve to show the default.
26
+
27
+ import sys
28
+ import os
29
+ import shlex
30
+
31
+ # If extensions (or modules to document with autodoc) are in another directory,
32
+ # add these directories to sys.path here. If the directory is relative to the
33
+ # documentation root, use os.path.abspath to make it absolute, like shown here.
34
+ sys.path.insert(0, os.path.abspath(".."))
35
+
36
+ # For plugins that can not read conf.py.
37
+ # See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85
38
+ sys.path.insert(0, os.path.abspath("."))
39
+
40
+ __version__ = ""
41
+
42
+ # -- General configuration ------------------------------------------------
43
+
44
+ # If your documentation needs a minimal Sphinx version, state it here.
45
+ needs_sphinx = "1.5.5"
46
+
47
+ # Add any Sphinx extension module names here, as strings. They can be
48
+ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
49
+ # ones.
50
+ extensions = [
51
+ "sphinx.ext.autodoc",
52
+ "sphinx.ext.autosummary",
53
+ "sphinx.ext.intersphinx",
54
+ "sphinx.ext.coverage",
55
+ "sphinx.ext.doctest",
56
+ "sphinx.ext.napoleon",
57
+ "sphinx.ext.todo",
58
+ "sphinx.ext.viewcode",
59
+ "recommonmark",
60
+ ]
61
+
62
+ # autodoc/autosummary flags
63
+ autoclass_content = "both"
64
+ autodoc_default_options = {"members": True}
65
+ autosummary_generate = True
66
+
67
+
68
+ # Add any paths that contain templates here, relative to this directory.
69
+ templates_path = ["_templates"]
70
+
71
+ # The suffix(es) of source filenames.
72
+ # You can specify multiple suffix as a list of string:
73
+ # source_suffix = ['.rst', '.md']
74
+ source_suffix = [".rst", ".md"]
75
+
76
+ # The encoding of source files.
77
+ # source_encoding = 'utf-8-sig'
78
+
79
+ # The root toctree document.
80
+ root_doc = "index"
81
+
82
+ # General information about the project.
83
+ project = "google-cloud-vertexai"
84
+ copyright = "2019, Google"
85
+ author = "Google APIs"
86
+
87
+ # The version info for the project you're documenting, acts as replacement for
88
+ # |version| and |release|, also used in various other places throughout the
89
+ # built documents.
90
+ #
91
+ # The full version, including alpha/beta/rc tags.
92
+ release = __version__
93
+ # The short X.Y version.
94
+ version = ".".join(release.split(".")[0:2])
95
+
96
+ # The language for content autogenerated by Sphinx. Refer to documentation
97
+ # for a list of supported languages.
98
+ #
99
+ # This is also used if you do content translation via gettext catalogs.
100
+ # Usually you set "language" from the command line for these cases.
101
+ language = None
102
+
103
+ # There are two options for replacing |today|: either, you set today to some
104
+ # non-false value, then it is used:
105
+ # today = ''
106
+ # Else, today_fmt is used as the format for a strftime call.
107
+ # today_fmt = '%B %d, %Y'
108
+
109
+ # List of patterns, relative to source directory, that match files and
110
+ # directories to ignore when looking for source files.
111
+ exclude_patterns = [
112
+ "_build",
113
+ "**/.nox/**/*",
114
+ "samples/AUTHORING_GUIDE.md",
115
+ "samples/CONTRIBUTING.md",
116
+ "samples/snippets/README.rst",
117
+ ]
118
+
119
+ # The reST default role (used for this markup: `text`) to use for all
120
+ # documents.
121
+ # default_role = None
122
+
123
+ # If true, '()' will be appended to :func: etc. cross-reference text.
124
+ # add_function_parentheses = True
125
+
126
+ # If true, the current module name will be prepended to all description
127
+ # unit titles (such as .. function::).
128
+ # add_module_names = True
129
+
130
+ # If true, sectionauthor and moduleauthor directives will be shown in the
131
+ # output. They are ignored by default.
132
+ # show_authors = False
133
+
134
+ # The name of the Pygments (syntax highlighting) style to use.
135
+ pygments_style = "sphinx"
136
+
137
+ # A list of ignored prefixes for module index sorting.
138
+ # modindex_common_prefix = []
139
+
140
+ # If true, keep warnings as "system message" paragraphs in the built documents.
141
+ # keep_warnings = False
142
+
143
+ # If true, `todo` and `todoList` produce output, else they produce nothing.
144
+ todo_include_todos = True
145
+
146
+
147
+ # -- Options for HTML output ----------------------------------------------
148
+
149
+ # The theme to use for HTML and HTML Help pages. See the documentation for
150
+ # a list of builtin themes.
151
+ html_theme = "alabaster"
152
+
153
+ # Theme options are theme-specific and customize the look and feel of a theme
154
+ # further. For a list of options available for each theme, see the
155
+ # documentation.
156
+ html_theme_options = {
157
+ "description": "Google Cloud Client Libraries for google-cloud-aiplatform",
158
+ "github_user": "googleapis",
159
+ "github_repo": "python-aiplatform",
160
+ "github_banner": True,
161
+ "font_family": "'Roboto', Georgia, sans",
162
+ "head_font_family": "'Roboto', Georgia, serif",
163
+ "code_font_family": "'Roboto Mono', 'Consolas', monospace",
164
+ }
165
+
166
+ # Add any paths that contain custom themes here, relative to this directory.
167
+ # html_theme_path = []
168
+
169
+ # The name for this set of Sphinx documents. If None, it defaults to
170
+ # "<project> v<release> documentation".
171
+ # html_title = None
172
+
173
+ # A shorter title for the navigation bar. Default is the same as html_title.
174
+ # html_short_title = None
175
+
176
+ # The name of an image file (relative to this directory) to place at the top
177
+ # of the sidebar.
178
+ # html_logo = None
179
+
180
+ # The name of an image file (within the static path) to use as favicon of the
181
+ # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
182
+ # pixels large.
183
+ # html_favicon = None
184
+
185
+ # Add any paths that contain custom static files (such as style sheets) here,
186
+ # relative to this directory. They are copied after the builtin static files,
187
+ # so a file named "default.css" will overwrite the builtin "default.css".
188
+ html_static_path = ["_static"]
189
+
190
+ # Add any extra paths that contain custom files (such as robots.txt or
191
+ # .htaccess) here, relative to this directory. These files are copied
192
+ # directly to the root of the documentation.
193
+ # html_extra_path = []
194
+
195
+ # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
196
+ # using the given strftime format.
197
+ # html_last_updated_fmt = '%b %d, %Y'
198
+
199
+ # If true, SmartyPants will be used to convert quotes and dashes to
200
+ # typographically correct entities.
201
+ # html_use_smartypants = True
202
+
203
+ # Custom sidebar templates, maps document names to template names.
204
+ # html_sidebars = {}
205
+
206
+ # Additional templates that should be rendered to pages, maps page names to
207
+ # template names.
208
+ # html_additional_pages = {}
209
+
210
+ # If false, no module index is generated.
211
+ # html_domain_indices = True
212
+
213
+ # If false, no index is generated.
214
+ # html_use_index = True
215
+
216
+ # If true, the index is split into individual pages for each letter.
217
+ # html_split_index = False
218
+
219
+ # If true, links to the reST sources are added to the pages.
220
+ # html_show_sourcelink = True
221
+
222
+ # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
223
+ # html_show_sphinx = True
224
+
225
+ # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
226
+ # html_show_copyright = True
227
+
228
+ # If true, an OpenSearch description file will be output, and all pages will
229
+ # contain a <link> tag referring to it. The value of this option must be the
230
+ # base URL from which the finished HTML is served.
231
+ # html_use_opensearch = ''
232
+
233
+ # This is the file name suffix for HTML files (e.g. ".xhtml").
234
+ # html_file_suffix = None
235
+
236
+ # Language to be used for generating the HTML full-text search index.
237
+ # Sphinx supports the following languages:
238
+ # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
239
+ # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
240
+ # html_search_language = 'en'
241
+
242
+ # A dictionary with options for the search language support, empty by default.
243
+ # Now only 'ja' uses this config value
244
+ # html_search_options = {'type': 'default'}
245
+
246
+ # The name of a javascript file (relative to the configuration directory) that
247
+ # implements a search results scorer. If empty, the default will be used.
248
+ # html_search_scorer = 'scorer.js'
249
+
250
+ # Output file base name for HTML help builder.
251
+ htmlhelp_basename = "google-cloud-aiplatform-doc"
252
+
253
+ # -- Options for warnings ------------------------------------------------------
254
+
255
+
256
+ suppress_warnings = [
257
+ # Temporarily suppress this to avoid "more than one target found for
258
+ # cross-reference" warning, which are intractable for us to avoid while in
259
+ # a mono-repo.
260
+ # See https://github.com/sphinx-doc/sphinx/blob
261
+ # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
262
+ "ref.python"
263
+ ]
264
+
265
+ # -- Options for LaTeX output ---------------------------------------------
266
+
267
+ latex_elements = {
268
+ # The paper size ('letterpaper' or 'a4paper').
269
+ #'papersize': 'letterpaper',
270
+ # The font size ('10pt', '11pt' or '12pt').
271
+ #'pointsize': '10pt',
272
+ # Additional stuff for the LaTeX preamble.
273
+ #'preamble': '',
274
+ # Latex figure (float) alignment
275
+ #'figure_align': 'htbp',
276
+ }
277
+
278
+ # Grouping the document tree into LaTeX files. List of tuples
279
+ # (source start file, target name, title,
280
+ # author, documentclass [howto, manual, or own class]).
281
+ latex_documents = [
282
+ (
283
+ root_doc,
284
+ "google-cloud-aiplatform.tex",
285
+ "google-cloud-aiplatform Documentation",
286
+ author,
287
+ "manual",
288
+ )
289
+ ]
290
+
291
+ # The name of an image file (relative to this directory) to place at the top of
292
+ # the title page.
293
+ # latex_logo = None
294
+
295
+ # For "manual" documents, if this is true, then toplevel headings are parts,
296
+ # not chapters.
297
+ # latex_use_parts = False
298
+
299
+ # If true, show page references after internal links.
300
+ # latex_show_pagerefs = False
301
+
302
+ # If true, show URL addresses after external links.
303
+ # latex_show_urls = False
304
+
305
+ # Documents to append as an appendix to all manuals.
306
+ # latex_appendices = []
307
+
308
+ # If false, no module index is generated.
309
+ # latex_domain_indices = True
310
+
311
+
312
+ # -- Options for manual page output ---------------------------------------
313
+
314
+ # One entry per manual page. List of tuples
315
+ # (source start file, name, description, authors, manual section).
316
+ man_pages = [
317
+ (
318
+ root_doc,
319
+ "google-cloud-aiplatform",
320
+ "google-cloud-aiplatform Documentation",
321
+ [author],
322
+ 1,
323
+ )
324
+ ]
325
+
326
+ # If true, show URL addresses after external links.
327
+ # man_show_urls = False
328
+
329
+
330
+ # -- Options for Texinfo output -------------------------------------------
331
+
332
+ # Grouping the document tree into Texinfo files. List of tuples
333
+ # (source start file, target name, title, author,
334
+ # dir menu entry, description, category)
335
+ texinfo_documents = [
336
+ (
337
+ root_doc,
338
+ "google-cloud-aiplatform",
339
+ "google-cloud-aiplatform Documentation",
340
+ author,
341
+ "google-cloud-aiplatform",
342
+ "google-cloud-aiplatform Library",
343
+ "APIs",
344
+ )
345
+ ]
346
+
347
+ # Documents to append as an appendix to all manuals.
348
+ # texinfo_appendices = []
349
+
350
+ # If false, no module index is generated.
351
+ # texinfo_domain_indices = True
352
+
353
+ # How to display URL addresses: 'footnote', 'no', or 'inline'.
354
+ # texinfo_show_urls = 'footnote'
355
+
356
+ # If true, do not generate a @detailmenu in the "Top" node's menu.
357
+ # texinfo_no_detailmenu = False
358
+
359
+
360
+ # Example configuration for intersphinx: refer to the Python standard library.
361
+ intersphinx_mapping = {
362
+ "python": ("https://python.readthedocs.org/en/latest/", None),
363
+ "google-auth": ("https://googleapis.dev/python/google-auth/latest/", None),
364
+ "google.api_core": (
365
+ "https://googleapis.dev/python/google-api-core/latest/",
366
+ None,
367
+ ),
368
+ "grpc": ("https://grpc.github.io/grpc/python/", None),
369
+ "proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
370
+ "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
371
+ }
372
+
373
+
374
+ # Napoleon settings
375
+ napoleon_google_docstring = True
376
+ napoleon_numpy_docstring = True
377
+ napoleon_include_private_with_doc = False
378
+ napoleon_include_special_with_doc = True
379
+ napoleon_use_admonition_for_examples = False
380
+ napoleon_use_admonition_for_notes = False
381
+ napoleon_use_admonition_for_references = False
382
+ napoleon_use_ivar = False
383
+ napoleon_use_param = True
384
+ napoleon_use_rtype = True
385
+
386
+
387
+ def adopt_members_reexported_from_private_modules(public_module: str):
388
+ """Remaps the module items that come from internal modules.
389
+
390
+ A public module might be exporting items that are imported from private modules.
391
+ This function changes the `__module__` of such items to the public module.
392
+
393
+ Example:
394
+ `package/public.py`:
395
+
396
+ ```
397
+ from package._private import _PrivateClass as PublicClass
398
+ __all__ = ["PublicClass"]
399
+ ```
400
+
401
+ Calling this function on the `package.public` module will change:
402
+ ```
403
+ package._private._PrivateClass.__name__ = "PublicClass"
404
+ package._private._PrivateClass.__module__ = "package.public"
405
+ ```
406
+ """
407
+ for name, cls in public_module.__dict__.items():
408
+ if name in public_module.__all__:
409
+ if "._" in cls.__module__:
410
+ cls.__name__ = name
411
+ cls.__module__ = public_module.__name__
412
+
413
+
414
+ def setup(*args, **kwargs):
415
+ # 1. Giving pretty module names to the GA and preview classes
416
+ # 2. Giving pretty class names to the preview classes
417
+ # 3. Making Sphinx automodule render the class members instead of
418
+ # dismissing the exported private classes as "Alias of".
419
+ from vertexai import evaluation
420
+ from vertexai import language_models
421
+ from vertexai import vision_models
422
+ from vertexai.preview import (
423
+ language_models as preview_language_models,
424
+ )
425
+ from vertexai.preview import (
426
+ vision_models as preview_vision_models,
427
+ )
428
+
429
+ # There are many possible ways to select which classes to fix.
430
+ # We select the publicly exported members that have an internal module ("*._*").
431
+
432
+ # Setting the modules of the GA classes
433
+ adopt_members_reexported_from_private_modules(evaluation)
434
+ adopt_members_reexported_from_private_modules(language_models)
435
+ adopt_members_reexported_from_private_modules(vision_models)
436
+
437
+ # Setting the modules of the public preview classes
438
+ # Selecting the members that still have an internal module after the GA fixes.
439
+ adopt_members_reexported_from_private_modules(preview_language_models)
440
+ adopt_members_reexported_from_private_modules(preview_vision_models)
testbed/googleapis__python-aiplatform/gemini_docs/index.rst ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ .. include:: README.md
2
+
3
+ API Reference
4
+ -------------
5
+ .. toctree::
6
+ :maxdepth: 2
7
+
8
+ vertexai/vertexai
testbed/googleapis__python-aiplatform/gemini_docs/vertexai/vertexai.rst ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Vertex AI SDK
2
+ =============================================
3
+
4
+ .. automodule:: vertexai
5
+ :members:
6
+ :show-inheritance:
7
+ :inherited-members:
8
+
9
+ .. automodule:: vertexai.generative_models
10
+ :members:
11
+ :show-inheritance:
12
+ :inherited-members:
13
+
14
+ .. automodule:: vertexai.preview.generative_models
15
+ :members:
16
+ :show-inheritance:
17
+ :inherited-members:
18
+
19
+ .. automodule:: vertexai.preview.prompts
20
+ :members:
21
+ :show-inheritance:
22
+ :inherited-members:
23
+
24
+ .. automodule:: vertexai.prompts._prompts
25
+ :members:
26
+ :show-inheritance:
27
+ :inherited-members:
28
+
29
+ .. automodule:: vertexai.prompts._prompt_management
30
+ :members:
31
+ :show-inheritance:
32
+ :inherited-members:
33
+
34
+ .. automodule:: vertexai.language_models
35
+ :members:
36
+ :show-inheritance:
37
+ :inherited-members:
38
+
39
+ .. automodule:: vertexai.language_models._language_models
40
+ :no-members:
41
+ :private-members: _TunableModelMixin
42
+
43
+ .. automodule:: vertexai.preview
44
+ :members:
45
+ :show-inheritance:
46
+ :inherited-members:
47
+
48
+ .. automodule:: vertexai.preview.language_models
49
+ :members:
50
+ :show-inheritance:
51
+ :inherited-members:
52
+
53
+ .. automodule:: vertexai.vision_models
54
+ :members:
55
+ :show-inheritance:
56
+ :inherited-members:
57
+
58
+ .. automodule:: vertexai.preview.vision_models
59
+ :members:
60
+ :show-inheritance:
61
+ :inherited-members:
62
+
63
+ .. automodule:: vertexai.preview.tuning
64
+ :members:
65
+ :show-inheritance:
66
+ :inherited-members:
67
+
68
+ .. automodule:: vertexai.preview.tuning.sft
69
+ :members:
70
+ :show-inheritance:
71
+ :inherited-members:
72
+
73
+ .. automodule:: vertexai.evaluation
74
+ :members:
75
+ :show-inheritance:
76
+ :inherited-members:
77
+
78
+ .. automodule:: vertexai.preview.reasoning_engines
79
+ :members:
80
+ :show-inheritance:
81
+ :inherited-members:
82
+
83
+ .. automodule:: vertexai.resources
84
+ :no-members:
85
+
86
+ .. automodule:: vertexai.resources.preview
87
+ :no-members:
88
+
89
+ .. automodule:: vertexai.resources.preview.ml_monitoring
90
+ :members:
91
+ :show-inheritance:
92
+ :inherited-members:
93
+
94
+ .. automodule:: vertexai.resources.preview.ml_monitoring.spec
95
+ :members:
96
+ :show-inheritance:
97
+ :inherited-members:
testbed/googleapis__python-aiplatform/mypy.ini ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [mypy]
2
+ python_version = 3.7
3
+ namespace_packages = True
testbed/googleapis__python-aiplatform/noxfile.py ADDED
@@ -0,0 +1,591 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ #
3
+ # Copyright 2018 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # https://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # Generated by synthtool. DO NOT EDIT!
18
+
19
+ from __future__ import absolute_import
20
+ import os
21
+ import pathlib
22
+ import re
23
+ import shutil
24
+ import warnings
25
+
26
+ import nox
27
+
28
+ FLAKE8_VERSION = "flake8==6.1.0"
29
+ BLACK_VERSION = "black==22.3.0"
30
+ ISORT_VERSION = "isort==5.10.1"
31
+ LINT_PATHS = ["docs", "google", "vertexai", "tests", "noxfile.py", "setup.py"]
32
+
33
+ DEFAULT_PYTHON_VERSION = "3.8"
34
+
35
+ DOCS_DEPENDENCIES = (
36
+ "sphinx==5.0.2",
37
+ "alabaster",
38
+ "google-cloud-aiplatform[evaluation]",
39
+ "recommonmark",
40
+ )
41
+
42
+ DOCFX_DEPENDENCIES = (
43
+ "gcp-sphinx-docfx-yaml",
44
+ "sphinxcontrib-applehelp==1.0.4",
45
+ "sphinxcontrib-devhelp==1.0.2",
46
+ "sphinxcontrib-htmlhelp==2.0.1",
47
+ "sphinxcontrib-qthelp==1.0.3",
48
+ "sphinxcontrib-serializinghtml==1.1.5",
49
+ "alabaster",
50
+ "google-cloud-aiplatform[evaluation]",
51
+ "recommonmark",
52
+ )
53
+
54
+ UNIT_TEST_PYTHON_VERSIONS = ["3.8", "3.9", "3.10", "3.11", "3.12"]
55
+ UNIT_TEST_LANGCHAIN_PYTHON_VERSIONS = ["3.9", "3.10", "3.11", "3.12"]
56
+ UNIT_TEST_STANDARD_DEPENDENCIES = [
57
+ "mock",
58
+ "asyncmock",
59
+ "pytest",
60
+ "pytest-cov",
61
+ "pytest-asyncio",
62
+ # Preventing: py.test: error: unrecognized arguments: -n=auto --dist=loadscope
63
+ "pytest-xdist",
64
+ ]
65
+ UNIT_TEST_EXTERNAL_DEPENDENCIES = []
66
+ UNIT_TEST_LOCAL_DEPENDENCIES = []
67
+ UNIT_TEST_DEPENDENCIES = []
68
+ UNIT_TEST_EXTRAS = [
69
+ "testing",
70
+ ]
71
+ UNIT_TEST_EXTRAS_BY_PYTHON = {}
72
+
73
+ SYSTEM_TEST_PYTHON_VERSIONS = ["3.10"]
74
+ SYSTEM_TEST_STANDARD_DEPENDENCIES = [
75
+ "mock",
76
+ "pytest",
77
+ "google-cloud-testutils",
78
+ ]
79
+ SYSTEM_TEST_EXTERNAL_DEPENDENCIES = []
80
+ SYSTEM_TEST_LOCAL_DEPENDENCIES = []
81
+ SYSTEM_TEST_DEPENDENCIES = []
82
+ SYSTEM_TEST_EXTRAS = [
83
+ "testing",
84
+ ]
85
+ SYSTEM_TEST_EXTRAS_BY_PYTHON = {}
86
+
87
+ CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
88
+
89
+ # 'docfx' is excluded since it only needs to run in 'docs-presubmit'
90
+ nox.options.sessions = [
91
+ "unit",
92
+ "unit_ray",
93
+ "unit_langchain",
94
+ "system",
95
+ "cover",
96
+ "lint",
97
+ "lint_setup_py",
98
+ "blacken",
99
+ "docs",
100
+ ]
101
+
102
+ # Error if a python version is missing
103
+ nox.options.error_on_missing_interpreters = True
104
+
105
+
106
+ @nox.session(python=DEFAULT_PYTHON_VERSION)
107
+ def lint(session):
108
+ """Run linters.
109
+
110
+ Returns a failure if the linters find linting errors or sufficiently
111
+ serious code quality issues.
112
+ """
113
+ session.install(FLAKE8_VERSION, BLACK_VERSION)
114
+ session.run(
115
+ "black",
116
+ "--check",
117
+ "--diff",
118
+ *LINT_PATHS,
119
+ )
120
+ session.run("flake8", *LINT_PATHS)
121
+
122
+
123
+ @nox.session(python=DEFAULT_PYTHON_VERSION)
124
+ def blacken(session):
125
+ """Run black. Format code to uniform standard."""
126
+ session.install(BLACK_VERSION)
127
+ session.run(
128
+ "black",
129
+ *LINT_PATHS,
130
+ )
131
+
132
+
133
+ @nox.session(python=DEFAULT_PYTHON_VERSION)
134
+ def format(session):
135
+ """
136
+ Run isort to sort imports. Then run black
137
+ to format code to uniform standard.
138
+ """
139
+ session.install(BLACK_VERSION, ISORT_VERSION)
140
+ # Use the --fss option to sort imports using strict alphabetical order.
141
+ # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections
142
+ session.run(
143
+ "isort",
144
+ "--fss",
145
+ *LINT_PATHS,
146
+ )
147
+ session.run(
148
+ "black",
149
+ *LINT_PATHS,
150
+ )
151
+
152
+
153
+ @nox.session(python=DEFAULT_PYTHON_VERSION)
154
+ def lint_setup_py(session):
155
+ """Verify that setup.py is valid (including RST check)."""
156
+ session.install("docutils", "pygments")
157
+ session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
158
+
159
+
160
+ def install_unittest_dependencies(session, *constraints):
161
+ standard_deps = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_DEPENDENCIES
162
+ session.install(*standard_deps, *constraints)
163
+
164
+ if UNIT_TEST_EXTERNAL_DEPENDENCIES:
165
+ warnings.warn(
166
+ "'unit_test_external_dependencies' is deprecated. Instead, please "
167
+ "use 'unit_test_dependencies' or 'unit_test_local_dependencies'.",
168
+ DeprecationWarning,
169
+ )
170
+ session.install(*UNIT_TEST_EXTERNAL_DEPENDENCIES, *constraints)
171
+
172
+ if UNIT_TEST_LOCAL_DEPENDENCIES:
173
+ session.install(*UNIT_TEST_LOCAL_DEPENDENCIES, *constraints)
174
+
175
+ if UNIT_TEST_EXTRAS_BY_PYTHON:
176
+ extras = UNIT_TEST_EXTRAS_BY_PYTHON.get(session.python, [])
177
+ elif UNIT_TEST_EXTRAS:
178
+ extras = UNIT_TEST_EXTRAS
179
+ else:
180
+ extras = []
181
+
182
+ if extras:
183
+ session.install("-e", f".[{','.join(extras)}]", *constraints)
184
+ else:
185
+ session.install("-e", ".", *constraints)
186
+
187
+
188
+ def default(session):
189
+ # Install all test dependencies, then install this package in-place.
190
+
191
+ constraints_path = str(
192
+ CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
193
+ )
194
+ install_unittest_dependencies(session, "-c", constraints_path)
195
+
196
+ # Run py.test against the unit tests.
197
+ session.run(
198
+ "py.test",
199
+ "--quiet",
200
+ f"--junitxml=unit_{session.python}_sponge_log.xml",
201
+ "--cov=google",
202
+ "--cov-append",
203
+ "--cov-config=.coveragerc",
204
+ "--cov-report=",
205
+ "--cov-fail-under=0",
206
+ "--ignore=tests/unit/vertex_ray",
207
+ "--ignore=tests/unit/vertex_langchain",
208
+ "--ignore=tests/unit/architecture",
209
+ os.path.join("tests", "unit"),
210
+ *session.posargs,
211
+ )
212
+
213
+ # Run tests that require isolation.
214
+ session.run(
215
+ "py.test",
216
+ "--quiet",
217
+ f"--junitxml=unit_{session.python}_test_vertexai_import_sponge_log.xml",
218
+ os.path.join("tests", "unit", "architecture", "test_vertexai_import.py"),
219
+ *session.posargs,
220
+ )
221
+
222
+
223
+ @nox.session(python=UNIT_TEST_PYTHON_VERSIONS)
224
+ def unit(session):
225
+ """Run the unit test suite."""
226
+ # First run the minimal GenAI tests
227
+ unit_genai_minimal_dependencies(session)
228
+
229
+ # Then run the default full test suite
230
+ default(session)
231
+
232
+
233
+ def unit_genai_minimal_dependencies(session):
234
+ # Install minimal test dependencies, then install this package in-place.
235
+
236
+ standard_deps = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_DEPENDENCIES
237
+ session.install(*standard_deps)
238
+ session.install("-e", ".")
239
+
240
+ # Run py.test against the unit tests.
241
+ session.run(
242
+ "py.test",
243
+ "--quiet",
244
+ f"--junitxml=unit_{session.python}_sponge_log.xml",
245
+ # These tests require the PIL module
246
+ # "--ignore=TestGenerativeModels::test_image_mime_types",
247
+ os.path.join("tests", "unit", "vertexai", "test_generative_models.py"),
248
+ *session.posargs,
249
+ )
250
+
251
+
252
+ @nox.session(python="3.10")
253
+ @nox.parametrize("ray", ["2.9.3", "2.33.0"])
254
+ def unit_ray(session, ray):
255
+ # Install all test dependencies, then install this package in-place.
256
+
257
+ constraints_path = str(CURRENT_DIRECTORY / "testing" / f"constraints-ray-{ray}.txt")
258
+ standard_deps = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_DEPENDENCIES
259
+ session.install(*standard_deps, "-c", constraints_path)
260
+
261
+ # Install ray extras
262
+ session.install("-e", ".[ray_testing]", "-c", constraints_path)
263
+
264
+ # Run py.test against the unit tests.
265
+ session.run(
266
+ "py.test",
267
+ "--quiet",
268
+ f"--junitxml=unit_ray_{ray}_sponge_log.xml",
269
+ "--cov=google",
270
+ "--cov-append",
271
+ "--cov-config=.coveragerc",
272
+ "--cov-report=",
273
+ "--cov-fail-under=0",
274
+ os.path.join("tests", "unit", "vertex_ray"),
275
+ *session.posargs,
276
+ )
277
+
278
+
279
+ @nox.session(python=UNIT_TEST_LANGCHAIN_PYTHON_VERSIONS)
280
+ def unit_langchain(session):
281
+ # Install all test dependencies, then install this package in-place.
282
+
283
+ constraints_path = str(CURRENT_DIRECTORY / "testing" / "constraints-langchain.txt")
284
+ standard_deps = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_DEPENDENCIES
285
+ session.install(*standard_deps, "-c", constraints_path)
286
+
287
+ # Install langchain extras
288
+ session.install("-e", ".[langchain_testing]", "-c", constraints_path)
289
+
290
+ # Run py.test against the unit tests.
291
+ session.run(
292
+ "py.test",
293
+ "--quiet",
294
+ "--junitxml=unit_langchain_sponge_log.xml",
295
+ "--cov=google",
296
+ "--cov-append",
297
+ "--cov-config=.coveragerc",
298
+ "--cov-report=",
299
+ "--cov-fail-under=0",
300
+ os.path.join("tests", "unit", "vertex_langchain"),
301
+ *session.posargs,
302
+ )
303
+
304
+
305
+ def install_systemtest_dependencies(session, *constraints):
306
+ # Use pre-release gRPC for system tests.
307
+ # Exclude version 1.52.0rc1 which has a known issue.
308
+ # See https://github.com/grpc/grpc/issues/32163
309
+ session.install("--pre", "grpcio!=1.52.0rc1")
310
+
311
+ session.install(*SYSTEM_TEST_STANDARD_DEPENDENCIES, *constraints)
312
+
313
+ if SYSTEM_TEST_EXTERNAL_DEPENDENCIES:
314
+ session.install(*SYSTEM_TEST_EXTERNAL_DEPENDENCIES, *constraints)
315
+
316
+ if SYSTEM_TEST_LOCAL_DEPENDENCIES:
317
+ session.install("-e", *SYSTEM_TEST_LOCAL_DEPENDENCIES, *constraints)
318
+
319
+ if SYSTEM_TEST_DEPENDENCIES:
320
+ session.install("-e", *SYSTEM_TEST_DEPENDENCIES, *constraints)
321
+
322
+ if SYSTEM_TEST_EXTRAS_BY_PYTHON:
323
+ extras = SYSTEM_TEST_EXTRAS_BY_PYTHON.get(session.python, [])
324
+ elif SYSTEM_TEST_EXTRAS:
325
+ extras = SYSTEM_TEST_EXTRAS
326
+ else:
327
+ extras = []
328
+
329
+ if extras:
330
+ session.install("-e", f".[{','.join(extras)}]", *constraints)
331
+ else:
332
+ session.install("-e", ".", *constraints)
333
+
334
+
335
+ @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
336
+ def system(session):
337
+ """Run the system test suite."""
338
+ constraints_path = str(
339
+ CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
340
+ )
341
+ system_test_path = os.path.join("tests", "system.py")
342
+ system_test_folder_path = os.path.join("tests", "system")
343
+
344
+ # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.
345
+ if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false":
346
+ session.skip("RUN_SYSTEM_TESTS is set to false, skipping")
347
+ # Install pyopenssl for mTLS testing.
348
+ if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true":
349
+ session.install("pyopenssl")
350
+
351
+ system_test_exists = os.path.exists(system_test_path)
352
+ system_test_folder_exists = os.path.exists(system_test_folder_path)
353
+ # Sanity check: only run tests if found.
354
+ if not system_test_exists and not system_test_folder_exists:
355
+ session.skip("System tests were not found")
356
+
357
+ install_systemtest_dependencies(session, "-c", constraints_path)
358
+
359
+ # Run py.test against the system tests.
360
+ if system_test_exists:
361
+ session.run(
362
+ "py.test",
363
+ "--quiet",
364
+ f"--junitxml=system_{session.python}_sponge_log.xml",
365
+ system_test_path,
366
+ *session.posargs,
367
+ )
368
+ if system_test_folder_exists:
369
+ session.run(
370
+ "py.test",
371
+ "-v",
372
+ f"--junitxml=system_{session.python}_sponge_log.xml",
373
+ system_test_folder_path,
374
+ *session.posargs,
375
+ )
376
+
377
+
378
+ @nox.session(python=DEFAULT_PYTHON_VERSION)
379
+ def cover(session):
380
+ """Run the final coverage report.
381
+
382
+ This outputs the coverage report aggregating coverage from the unit
383
+ test runs (not system test runs), and then erases coverage data.
384
+ """
385
+ session.install("coverage", "pytest-cov")
386
+ session.run("coverage", "report", "--show-missing", "--fail-under=85")
387
+
388
+ session.run("coverage", "erase")
389
+
390
+
391
+ @nox.session(python="3.9")
392
+ def docs(session):
393
+ """Build the docs for this library."""
394
+
395
+ session.install("-e", ".")
396
+ session.install(
397
+ *DOCS_DEPENDENCIES,
398
+ "google-cloud-aiplatform[prediction]",
399
+ )
400
+
401
+ shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
402
+ session.run(
403
+ "sphinx-build",
404
+ "-T", # show full traceback on exception
405
+ "-N", # no colors
406
+ "-b",
407
+ "html",
408
+ "-d",
409
+ os.path.join("docs", "_build", "doctrees", ""),
410
+ os.path.join("docs", ""),
411
+ os.path.join("docs", "_build", "html", ""),
412
+ )
413
+
414
+
415
+ @nox.session(python="3.10")
416
+ def docfx(session):
417
+ """Build the docfx yaml files for this library."""
418
+
419
+ session.install("-e", ".")
420
+ session.install(
421
+ *DOCFX_DEPENDENCIES,
422
+ "google-cloud-aiplatform[prediction]",
423
+ )
424
+
425
+ shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
426
+ session.run(
427
+ "sphinx-build",
428
+ "-T", # show full traceback on exception
429
+ "-N", # no colors
430
+ "-D",
431
+ (
432
+ "extensions=sphinx.ext.autodoc,"
433
+ "sphinx.ext.autosummary,"
434
+ "docfx_yaml.extension,"
435
+ "sphinx.ext.intersphinx,"
436
+ "sphinx.ext.coverage,"
437
+ "sphinx.ext.napoleon,"
438
+ "sphinx.ext.todo,"
439
+ "sphinx.ext.viewcode,"
440
+ "recommonmark"
441
+ ),
442
+ "-b",
443
+ "html",
444
+ "-d",
445
+ os.path.join("docs", "_build", "doctrees", ""),
446
+ os.path.join("docs", ""),
447
+ os.path.join("docs", "_build", "html", ""),
448
+ )
449
+
450
+
451
+ @nox.session(python="3.9")
452
+ def gemini_docs(session):
453
+ """Build the docs for library related to Gemini."""
454
+
455
+ session.install("-e", ".")
456
+ session.install(*DOCS_DEPENDENCIES)
457
+
458
+ shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
459
+ session.run(
460
+ "sphinx-build",
461
+ "-T", # show full traceback on exception
462
+ "-N", # no colors
463
+ "-b",
464
+ "html",
465
+ "-d",
466
+ os.path.join("gemini_docs", "_build", "doctrees", ""),
467
+ os.path.join("gemini_docs", ""),
468
+ os.path.join("gemini_docs", "_build", "html", ""),
469
+ )
470
+
471
+
472
+ @nox.session(python="3.10")
473
+ def gemini_docfx(session):
474
+ """Build the docfx yaml files for library related to Gemini."""
475
+
476
+ session.install("-e", ".")
477
+ session.install(*DOCFX_DEPENDENCIES)
478
+
479
+ shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
480
+ session.run(
481
+ "sphinx-build",
482
+ "-T", # show full traceback on exception
483
+ "-N", # no colors
484
+ "-D",
485
+ (
486
+ "extensions=sphinx.ext.autodoc,"
487
+ "sphinx.ext.autosummary,"
488
+ "docfx_yaml.extension,"
489
+ "sphinx.ext.intersphinx,"
490
+ "sphinx.ext.coverage,"
491
+ "sphinx.ext.napoleon,"
492
+ "sphinx.ext.todo,"
493
+ "sphinx.ext.viewcode,"
494
+ "recommonmark"
495
+ ),
496
+ "-b",
497
+ "html",
498
+ "-d",
499
+ os.path.join("gemini_docs", "_build", "doctrees", ""),
500
+ os.path.join("gemini_docs", ""),
501
+ os.path.join("gemini_docs", "_build", "html", ""),
502
+ )
503
+
504
+
505
+ @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
506
+ def prerelease_deps(session):
507
+ """Run all tests with prerelease versions of dependencies installed."""
508
+
509
+ # Install all dependencies
510
+ session.install("-e", ".[all, tests, tracing]")
511
+ unit_deps_all = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_EXTERNAL_DEPENDENCIES
512
+ session.install(*unit_deps_all)
513
+ system_deps_all = (
514
+ SYSTEM_TEST_STANDARD_DEPENDENCIES + SYSTEM_TEST_EXTERNAL_DEPENDENCIES
515
+ )
516
+ session.install(*system_deps_all)
517
+
518
+ # Because we test minimum dependency versions on the minimum Python
519
+ # version, the first version we test with in the unit tests sessions has a
520
+ # constraints file containing all dependencies and extras.
521
+ with open(
522
+ CURRENT_DIRECTORY
523
+ / "testing"
524
+ / f"constraints-{UNIT_TEST_PYTHON_VERSIONS[0]}.txt",
525
+ encoding="utf-8",
526
+ ) as constraints_file:
527
+ constraints_text = constraints_file.read()
528
+
529
+ # Ignore leading whitespace and comment lines.
530
+ constraints_deps = [
531
+ match.group(1)
532
+ for match in re.finditer(
533
+ r"^\s*(\S+)(?===\S+)", constraints_text, flags=re.MULTILINE
534
+ )
535
+ ]
536
+
537
+ session.install(*constraints_deps)
538
+
539
+ prerel_deps = [
540
+ "protobuf",
541
+ # dependency of grpc
542
+ "six",
543
+ "googleapis-common-protos",
544
+ # Exclude version 1.52.0rc1 which has a known issue. See https://github.com/grpc/grpc/issues/32163
545
+ "grpcio!=1.52.0rc1",
546
+ "grpcio-status",
547
+ "google-api-core",
548
+ "proto-plus",
549
+ "google-cloud-testutils",
550
+ # dependencies of google-cloud-testutils"
551
+ "click",
552
+ ]
553
+
554
+ for dep in prerel_deps:
555
+ session.install("--pre", "--no-deps", "--upgrade", dep)
556
+
557
+ # Remaining dependencies
558
+ other_deps = [
559
+ "requests",
560
+ "google-auth",
561
+ ]
562
+ session.install(*other_deps)
563
+
564
+ # Print out prerelease package versions
565
+ session.run(
566
+ "python", "-c", "import google.protobuf; print(google.protobuf.__version__)"
567
+ )
568
+ session.run("python", "-c", "import grpc; print(grpc.__version__)")
569
+
570
+ session.run("py.test", "tests/unit")
571
+
572
+ system_test_path = os.path.join("tests", "system.py")
573
+ system_test_folder_path = os.path.join("tests", "system")
574
+
575
+ # Only run system tests if found.
576
+ if os.path.exists(system_test_path):
577
+ session.run(
578
+ "py.test",
579
+ "--verbose",
580
+ f"--junitxml=system_{session.python}_sponge_log.xml",
581
+ system_test_path,
582
+ *session.posargs,
583
+ )
584
+ if os.path.exists(system_test_folder_path):
585
+ session.run(
586
+ "py.test",
587
+ "--verbose",
588
+ f"--junitxml=system_{session.python}_sponge_log.xml",
589
+ system_test_folder_path,
590
+ *session.posargs,
591
+ )
testbed/googleapis__python-aiplatform/owlbot.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """This script is used to synthesize generated parts of this library."""
16
+
17
+ import re
18
+
19
+ import synthtool as s
20
+ import synthtool.gcp as gcp
21
+ from synthtool.languages import python
22
+
23
+ common = gcp.CommonTemplates()
24
+
25
+ default_version = "v1"
26
+
27
+ has_generator_updates = False
28
+ for library in s.get_staging_dirs(default_version):
29
+ # ---------------------------------------------------------------------
30
+ # Patch each version of the library
31
+ # ---------------------------------------------------------------------
32
+
33
+ # https://github.com/googleapis/gapic-generator-python/issues/413
34
+ s.replace(
35
+ library
36
+ / f"google/cloud/aiplatform_{library.name}/services/prediction_service/client.py",
37
+ "request.instances = instances",
38
+ "request.instances.extend(instances)",
39
+ )
40
+
41
+ # Remove test_predict_flattened/test_predict_flattened_async due to gapic generator bug
42
+ # https://github.com/googleapis/gapic-generator-python/issues/414
43
+ s.replace(
44
+ library
45
+ / f"tests/unit/gapic/aiplatform_{library.name}/test_prediction_service.py",
46
+ """def test_predict_flattened.*?def test_predict_flattened_error""",
47
+ "def test_predict_flattened_error",
48
+ flags=re.MULTILINE | re.DOTALL,
49
+ )
50
+
51
+ # Remove test_explain_flattened/test_explain_flattened_async due to gapic generator bug
52
+ # https://github.com/googleapis/gapic-generator-python/issues/414
53
+ s.replace(
54
+ library
55
+ / f"tests/unit/gapic/aiplatform_{library.name}/test_prediction_service.py",
56
+ """def test_explain_flattened.*?def test_explain_flattened_error""",
57
+ "def test_explain_flattened_error",
58
+ flags=re.MULTILINE | re.DOTALL,
59
+ )
60
+
61
+ s.move(
62
+ library,
63
+ excludes=[
64
+ ".coveragerc",
65
+ ".pre-commit-config.yaml",
66
+ "setup.py",
67
+ "README.rst",
68
+ "docs/index.rst",
69
+ "docs/summary_overview.md",
70
+ f"docs/definition_{library.name}/services.rst",
71
+ f"docs/instance_{library.name}/services.rst",
72
+ f"docs/params_{library.name}/services.rst",
73
+ f"docs/prediction_{library.name}/services.rst",
74
+ f"scripts/fixup_aiplatform_{library.name}_keywords.py",
75
+ f"scripts/fixup_definition_{library.name}_keywords.py",
76
+ f"scripts/fixup_instance_{library.name}_keywords.py",
77
+ f"scripts/fixup_params_{library.name}_keywords.py",
78
+ f"scripts/fixup_prediction_{library.name}_keywords.py",
79
+ "google/cloud/aiplatform/__init__.py",
80
+ f"google/cloud/aiplatform/{library.name}/schema/**/services/",
81
+ "**/gapic_version.py", # exclude gapic_version.py to avoid reverting the version to 0.1.0
82
+ ".kokoro/samples",
83
+ "noxfile.py",
84
+ "testing",
85
+ "docs/conf.py",
86
+ ],
87
+ )
88
+ has_generator_updates = True
89
+
90
+ s.remove_staging_dirs()
91
+
92
+ # only run post processor when there are changes to the generated code
93
+ if has_generator_updates:
94
+ # ----------------------------------------------------------------------------
95
+ # Add templated files
96
+ # ----------------------------------------------------------------------------
97
+
98
+ templated_files = common.py_library(
99
+ cov_level=98,
100
+ system_test_python_versions=["3.8"],
101
+ unit_test_python_versions=["3.8", "3.9", "3.10", "3.11", "3.12"],
102
+ unit_test_extras=["testing"],
103
+ system_test_extras=["testing"],
104
+ microgenerator=True,
105
+ )
106
+ s.move(
107
+ templated_files,
108
+ excludes=[
109
+ ".coveragerc",
110
+ ".pre-commit-config.yaml",
111
+ ".kokoro/continuous/common.cfg",
112
+ ".kokoro/presubmit/presubmit.cfg",
113
+ ".kokoro/continuous/prerelease-deps.cfg",
114
+ ".kokoro/presubmit/prerelease-deps.cfg",
115
+ ".kokoro/docs/docs-presubmit.cfg",
116
+ ".kokoro/build.sh",
117
+ ".kokoro/release.sh",
118
+ ".kokoro/release/common.cfg",
119
+ ".kokoro/requirements*",
120
+ # exclude sample configs so periodic samples are tested against main
121
+ # instead of pypi
122
+ ".kokoro/samples/python3.7/common.cfg",
123
+ ".kokoro/samples/python3.8/common.cfg",
124
+ ".kokoro/samples/python3.9/common.cfg",
125
+ ".kokoro/samples/python3.10/common.cfg",
126
+ ".kokoro/samples/python3.11/common.cfg",
127
+ ".kokoro/samples/python3.12/common.cfg",
128
+ ".kokoro/samples/python3.7/periodic.cfg",
129
+ ".kokoro/samples/python3.8/periodic.cfg",
130
+ ".kokoro/samples/python3.9/periodic.cfg",
131
+ ".kokoro/samples/python3.10/periodic.cfg",
132
+ ".kokoro/samples/python3.11/periodic.cfg",
133
+ ".kokoro/samples/python3.12/periodic.cfg",
134
+ ".github/CODEOWNERS",
135
+ ".github/PULL_REQUEST_TEMPLATE.md",
136
+ ".github/workflows", # exclude gh actions as credentials are needed for tests
137
+ "README.rst",
138
+ ".github/release-please.yml", # use release please manifest
139
+ "noxfile.py",
140
+ "testing",
141
+ "docs/conf.py",
142
+ ],
143
+ ) # the microgenerator has a good coveragerc file
144
+
145
+ python.py_samples(skip_readmes=True)
146
+
147
+ python.configure_previous_major_version_branches()
148
+
149
+ # Update samples config to use `ucaip-sample-tests` project
150
+ s.replace(
151
+ ".kokoro/samples/python3.*/common.cfg",
152
+ """env_vars: \{
153
+ key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
154
+ value: "python-docs-samples-tests-.*?"
155
+ \}""",
156
+ """env_vars: {
157
+ key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
158
+ value: "ucaip-sample-tests"
159
+ }""",
160
+ )
161
+
162
+ s.replace(
163
+ ".kokoro/test-samples-impl.sh",
164
+ "python3.9",
165
+ "python3",
166
+ )
167
+
168
+ # Update publish-docs to include gemini docs workflow.
169
+ s.replace(
170
+ ".kokoro/publish-docs.sh",
171
+ "# build docs",
172
+ """\
173
+ # build Gemini docs
174
+ nox -s gemini_docs
175
+ # create metadata
176
+ python3 -m docuploader create-metadata \\
177
+ --name="vertexai" \\
178
+ --version=$(python3 setup.py --version) \\
179
+ --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \\
180
+ --distribution-name="google-cloud-vertexai" \\
181
+ --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \\
182
+ --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \\
183
+ --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json)
184
+ cat docs.metadata
185
+ # upload docs
186
+ python3 -m docuploader upload gemini_docs/_build/html --metadata-file docs.metadata --staging-bucket "${STAGING_BUCKET}"
187
+ # Gemini docfx yaml files
188
+ nox -s gemini_docfx
189
+ # create metadata.
190
+ python3 -m docuploader create-metadata \\
191
+ --name="vertexai" \\
192
+ --version=$(python3 setup.py --version) \\
193
+ --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \\
194
+ --distribution-name="google-cloud-vertexai" \\
195
+ --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \\
196
+ --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \\
197
+ --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json) \\
198
+ --stem="/vertex-ai/generative-ai/docs/reference/python"
199
+ cat docs.metadata
200
+ # upload docs
201
+ python3 -m docuploader upload gemini_docs/_build/html/docfx_yaml --metadata-file docs.metadata --destination-prefix docfx --staging-bucket "${V2_STAGING_BUCKET}"
202
+ # build docs""",
203
+ )
204
+
205
+ s.shell.run(["nox", "-s", "blacken"], hide_output=False)
testbed/googleapis__python-aiplatform/pypi/README.md ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # vertexai package
2
+
3
+ The pypi package in the `_vertex_ai_placeholder` directory is being used to
4
+ upload a package with `vertexai` namespace on PyPi which depends on `google-cloud-aiplatform`.
testbed/googleapis__python-aiplatform/pypi/_vertex_ai_placeholder/LICENSE ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ Apache License
3
+ Version 2.0, January 2004
4
+ http://www.apache.org/licenses/
5
+
6
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
+
8
+ 1. Definitions.
9
+
10
+ "License" shall mean the terms and conditions for use, reproduction,
11
+ and distribution as defined by Sections 1 through 9 of this document.
12
+
13
+ "Licensor" shall mean the copyright owner or entity authorized by
14
+ the copyright owner that is granting the License.
15
+
16
+ "Legal Entity" shall mean the union of the acting entity and all
17
+ other entities that control, are controlled by, or are under common
18
+ control with that entity. For the purposes of this definition,
19
+ "control" means (i) the power, direct or indirect, to cause the
20
+ direction or management of such entity, whether by contract or
21
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
22
+ outstanding shares, or (iii) beneficial ownership of such entity.
23
+
24
+ "You" (or "Your") shall mean an individual or Legal Entity
25
+ exercising permissions granted by this License.
26
+
27
+ "Source" form shall mean the preferred form for making modifications,
28
+ including but not limited to software source code, documentation
29
+ source, and configuration files.
30
+
31
+ "Object" form shall mean any form resulting from mechanical
32
+ transformation or translation of a Source form, including but
33
+ not limited to compiled object code, generated documentation,
34
+ and conversions to other media types.
35
+
36
+ "Work" shall mean the work of authorship, whether in Source or
37
+ Object form, made available under the License, as indicated by a
38
+ copyright notice that is included in or attached to the work
39
+ (an example is provided in the Appendix below).
40
+
41
+ "Derivative Works" shall mean any work, whether in Source or Object
42
+ form, that is based on (or derived from) the Work and for which the
43
+ editorial revisions, annotations, elaborations, or other modifications
44
+ represent, as a whole, an original work of authorship. For the purposes
45
+ of this License, Derivative Works shall not include works that remain
46
+ separable from, or merely link (or bind by name) to the interfaces of,
47
+ the Work and Derivative Works thereof.
48
+
49
+ "Contribution" shall mean any work of authorship, including
50
+ the original version of the Work and any modifications or additions
51
+ to that Work or Derivative Works thereof, that is intentionally
52
+ submitted to Licensor for inclusion in the Work by the copyright owner
53
+ or by an individual or Legal Entity authorized to submit on behalf of
54
+ the copyright owner. For the purposes of this definition, "submitted"
55
+ means any form of electronic, verbal, or written communication sent
56
+ to the Licensor or its representatives, including but not limited to
57
+ communication on electronic mailing lists, source code control systems,
58
+ and issue tracking systems that are managed by, or on behalf of, the
59
+ Licensor for the purpose of discussing and improving the Work, but
60
+ excluding communication that is conspicuously marked or otherwise
61
+ designated in writing by the copyright owner as "Not a Contribution."
62
+
63
+ "Contributor" shall mean Licensor and any individual or Legal Entity
64
+ on behalf of whom a Contribution has been received by Licensor and
65
+ subsequently incorporated within the Work.
66
+
67
+ 2. Grant of Copyright License. Subject to the terms and conditions of
68
+ this License, each Contributor hereby grants to You a perpetual,
69
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70
+ copyright license to reproduce, prepare Derivative Works of,
71
+ publicly display, publicly perform, sublicense, and distribute the
72
+ Work and such Derivative Works in Source or Object form.
73
+
74
+ 3. Grant of Patent License. Subject to the terms and conditions of
75
+ this License, each Contributor hereby grants to You a perpetual,
76
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77
+ (except as stated in this section) patent license to make, have made,
78
+ use, offer to sell, sell, import, and otherwise transfer the Work,
79
+ where such license applies only to those patent claims licensable
80
+ by such Contributor that are necessarily infringed by their
81
+ Contribution(s) alone or by combination of their Contribution(s)
82
+ with the Work to which such Contribution(s) was submitted. If You
83
+ institute patent litigation against any entity (including a
84
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
85
+ or a Contribution incorporated within the Work constitutes direct
86
+ or contributory patent infringement, then any patent licenses
87
+ granted to You under this License for that Work shall terminate
88
+ as of the date such litigation is filed.
89
+
90
+ 4. Redistribution. You may reproduce and distribute copies of the
91
+ Work or Derivative Works thereof in any medium, with or without
92
+ modifications, and in Source or Object form, provided that You
93
+ meet the following conditions:
94
+
95
+ (a) You must give any other recipients of the Work or
96
+ Derivative Works a copy of this License; and
97
+
98
+ (b) You must cause any modified files to carry prominent notices
99
+ stating that You changed the files; and
100
+
101
+ (c) You must retain, in the Source form of any Derivative Works
102
+ that You distribute, all copyright, patent, trademark, and
103
+ attribution notices from the Source form of the Work,
104
+ excluding those notices that do not pertain to any part of
105
+ the Derivative Works; and
106
+
107
+ (d) If the Work includes a "NOTICE" text file as part of its
108
+ distribution, then any Derivative Works that You distribute must
109
+ include a readable copy of the attribution notices contained
110
+ within such NOTICE file, excluding those notices that do not
111
+ pertain to any part of the Derivative Works, in at least one
112
+ of the following places: within a NOTICE text file distributed
113
+ as part of the Derivative Works; within the Source form or
114
+ documentation, if provided along with the Derivative Works; or,
115
+ within a display generated by the Derivative Works, if and
116
+ wherever such third-party notices normally appear. The contents
117
+ of the NOTICE file are for informational purposes only and
118
+ do not modify the License. You may add Your own attribution
119
+ notices within Derivative Works that You distribute, alongside
120
+ or as an addendum to the NOTICE text from the Work, provided
121
+ that such additional attribution notices cannot be construed
122
+ as modifying the License.
123
+
124
+ You may add Your own copyright statement to Your modifications and
125
+ may provide additional or different license terms and conditions
126
+ for use, reproduction, or distribution of Your modifications, or
127
+ for any such Derivative Works as a whole, provided Your use,
128
+ reproduction, and distribution of the Work otherwise complies with
129
+ the conditions stated in this License.
130
+
131
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
132
+ any Contribution intentionally submitted for inclusion in the Work
133
+ by You to the Licensor shall be under the terms and conditions of
134
+ this License, without any additional terms or conditions.
135
+ Notwithstanding the above, nothing herein shall supersede or modify
136
+ the terms of any separate license agreement you may have executed
137
+ with Licensor regarding such Contributions.
138
+
139
+ 6. Trademarks. This License does not grant permission to use the trade
140
+ names, trademarks, service marks, or product names of the Licensor,
141
+ except as required for reasonable and customary use in describing the
142
+ origin of the Work and reproducing the content of the NOTICE file.
143
+
144
+ 7. Disclaimer of Warranty. Unless required by applicable law or
145
+ agreed to in writing, Licensor provides the Work (and each
146
+ Contributor provides its Contributions) on an "AS IS" BASIS,
147
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148
+ implied, including, without limitation, any warranties or conditions
149
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150
+ PARTICULAR PURPOSE. You are solely responsible for determining the
151
+ appropriateness of using or redistributing the Work and assume any
152
+ risks associated with Your exercise of permissions under this License.
153
+
154
+ 8. Limitation of Liability. In no event and under no legal theory,
155
+ whether in tort (including negligence), contract, or otherwise,
156
+ unless required by applicable law (such as deliberate and grossly
157
+ negligent acts) or agreed to in writing, shall any Contributor be
158
+ liable to You for damages, including any direct, indirect, special,
159
+ incidental, or consequential damages of any character arising as a
160
+ result of this License or out of the use or inability to use the
161
+ Work (including but not limited to damages for loss of goodwill,
162
+ work stoppage, computer failure or malfunction, or any and all
163
+ other commercial damages or losses), even if such Contributor
164
+ has been advised of the possibility of such damages.
165
+
166
+ 9. Accepting Warranty or Additional Liability. While redistributing
167
+ the Work or Derivative Works thereof, You may choose to offer,
168
+ and charge a fee for, acceptance of support, warranty, indemnity,
169
+ or other liability obligations and/or rights consistent with this
170
+ License. However, in accepting such obligations, You may act only
171
+ on Your own behalf and on Your sole responsibility, not on behalf
172
+ of any other Contributor, and only if You agree to indemnify,
173
+ defend, and hold each Contributor harmless for any liability
174
+ incurred by, or claims asserted against, such Contributor by reason
175
+ of your accepting any such warranty or additional liability.
176
+
177
+ END OF TERMS AND CONDITIONS
178
+
179
+ APPENDIX: How to apply the Apache License to your work.
180
+
181
+ To apply the Apache License to your work, attach the following
182
+ boilerplate notice, with the fields enclosed by brackets "[]"
183
+ replaced with your own identifying information. (Don't include
184
+ the brackets!) The text should be enclosed in the appropriate
185
+ comment syntax for the file format. We also recommend that a
186
+ file or class name and description of purpose be included on the
187
+ same "printed page" as the copyright notice for easier
188
+ identification within third-party archives.
189
+
190
+ Copyright [yyyy] [name of copyright owner]
191
+
192
+ Licensed under the Apache License, Version 2.0 (the "License");
193
+ you may not use this file except in compliance with the License.
194
+ You may obtain a copy of the License at
195
+
196
+ http://www.apache.org/licenses/LICENSE-2.0
197
+
198
+ Unless required by applicable law or agreed to in writing, software
199
+ distributed under the License is distributed on an "AS IS" BASIS,
200
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201
+ See the License for the specific language governing permissions and
202
+ limitations under the License.
testbed/googleapis__python-aiplatform/pypi/_vertex_ai_placeholder/README.md ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ vertexai
2
+ ========
3
+
4
+ To use the Vertex GAPIC clients, please install the `google-cloud-aiplatform` PyPi package by running `pip install google-cloud-aiplatform`.
5
+
6
+ To use the Vertex AI SDK, please install the `vertexai` PyPi package by running `pip install vertexai`.
testbed/googleapis__python-aiplatform/pypi/_vertex_ai_placeholder/pyproject.toml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools>=61.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "vertexai"
7
+ dynamic = ["version", "dependencies", "optional-dependencies"]
8
+ authors = [
9
+ { name="Google LLC", email="googleapis-packages@google.com" },
10
+ ]
11
+ license = {text = "Apache 2.0"}
12
+ description = "Please run pip install vertexai to use the Vertex SDK."
13
+ readme = "README.md"
14
+ requires-python = ">=3.8"
15
+
16
+ [project.urls]
17
+ repository = "https://github.com/googleapis/python-aiplatform.git"
testbed/googleapis__python-aiplatform/pypi/_vertex_ai_placeholder/setup.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # Copyright 2024 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ #
17
+
18
+ import io
19
+ import os
20
+
21
+ import setuptools # type: ignore
22
+
23
+ name = "vertexai"
24
+ description = "Vertex AI API client library"
25
+
26
+ package_root = os.path.abspath(os.path.dirname(__file__))
27
+ readme_filename = os.path.join(package_root, "README.md")
28
+ with io.open(readme_filename, encoding="utf-8") as readme_file:
29
+ readme = readme_file.read()
30
+
31
+ version = {}
32
+ with open(os.path.join(package_root, "version.py")) as fp:
33
+ exec(fp.read(), version)
34
+ version = version["__version__"]
35
+
36
+ tensorboard_extra_require = ["tensorflow >=2.3.0, <3.0.0dev; python_version<='3.11'"]
37
+ metadata_extra_require = ["pandas >= 1.0.0", "numpy>=1.15.0"]
38
+ xai_extra_require = ["tensorflow >=2.3.0, <3.0.0dev"]
39
+ lit_extra_require = [
40
+ "tensorflow >= 2.3.0, <3.0.0dev",
41
+ "pandas >= 1.0.0",
42
+ "lit-nlp == 0.4.0",
43
+ "explainable-ai-sdk >= 1.0.0",
44
+ ]
45
+ profiler_extra_require = [
46
+ "tensorboard-plugin-profile >= 2.4.0, <3.0.0dev",
47
+ "werkzeug >= 2.0.0, <2.1.0dev",
48
+ "tensorflow >=2.4.0, <3.0.0dev",
49
+ ]
50
+ featurestore_extra_require = [
51
+ "google-cloud-bigquery-storage",
52
+ "pandas >= 1.0.0",
53
+ "pyarrow >= 6.0.1",
54
+ ]
55
+ pipelines_extra_require = [
56
+ "pyyaml>=5.3.1,<7",
57
+ ]
58
+ datasets_extra_require = [
59
+ "pyarrow >= 3.0.0, < 8.0dev; python_version<'3.11'",
60
+ "pyarrow >= 10.0.1; python_version=='3.11'",
61
+ "pyarrow >= 14.0.0; python_version>='3.12'",
62
+ ]
63
+
64
+ vizier_extra_require = [
65
+ "google-vizier>=0.1.6",
66
+ ]
67
+
68
+ prediction_extra_require = [
69
+ "docker >= 5.0.3",
70
+ "fastapi >= 0.71.0, <=0.109.1",
71
+ "httpx >=0.23.0, <0.25.0", # Optional dependency of fastapi
72
+ "starlette >= 0.17.1",
73
+ "uvicorn[standard] >= 0.16.0",
74
+ ]
75
+
76
+ endpoint_extra_require = ["requests >= 2.28.1"]
77
+
78
+ private_endpoints_extra_require = [
79
+ "urllib3 >=1.21.1, <1.27",
80
+ "requests >= 2.28.1",
81
+ ]
82
+
83
+ autologging_extra_require = ["mlflow>=1.27.0,<=2.1.1"]
84
+
85
+ preview_extra_require = [
86
+ "cloudpickle < 3.0",
87
+ "google-cloud-logging < 4.0",
88
+ ]
89
+
90
+ ray_extra_require = [
91
+ # Cluster only supports 2.4.0 and 2.9.3
92
+ (
93
+ "ray[default] >= 2.4, <= 2.9.3,!= 2.5.*,!= 2.6.*,!= 2.7.*,!="
94
+ " 2.8.*,!=2.9.0,!=2.9.1,!=2.9.2; python_version<'3.11'"
95
+ ),
96
+ # Ray Data v2.4 in Python 3.11 is broken, but got fixed in Ray v2.5.
97
+ "ray[default] >= 2.5, <= 2.9.3; python_version=='3.11'",
98
+ "google-cloud-bigquery-storage",
99
+ "google-cloud-bigquery",
100
+ "pandas >= 1.0.0, < 2.2.0",
101
+ "pyarrow >= 6.0.1",
102
+ # Workaround for https://github.com/ray-project/ray/issues/36990.
103
+ # TODO(b/295406381): Remove this pin when we drop support of ray<=2.5.
104
+ "pydantic < 2",
105
+ "immutabledict",
106
+ ]
107
+
108
+ genai_requires = (
109
+ "pydantic < 3",
110
+ "docstring_parser < 1",
111
+ )
112
+
113
+ ray_testing_extra_require = ray_extra_require + [
114
+ "pytest-xdist",
115
+ # ray train extras required for prediction tests
116
+ (
117
+ "ray[train] >= 2.4, <= 2.9.3,!= 2.5.*,!= 2.6.*,!= 2.7.*,!="
118
+ " 2.8.*,!=2.9.0,!=2.9.1,!=2.9.2"
119
+ ),
120
+ # Framework version constraints copied from testing_extra_require
121
+ "scikit-learn",
122
+ "tensorflow",
123
+ "torch >= 2.0.0, < 2.1.0",
124
+ "xgboost",
125
+ "xgboost_ray",
126
+ ]
127
+
128
+ reasoning_engine_extra_require = [
129
+ "cloudpickle >= 2.2.1, < 3.0",
130
+ "pydantic < 3",
131
+ ]
132
+
133
+ rapid_evaluation_extra_require = [
134
+ "nest_asyncio >= 1.0.0, < 1.6.0",
135
+ "pandas >= 1.0.0, < 2.2.0",
136
+ ]
137
+
138
+ langchain_extra_require = [
139
+ "langchain >= 0.1.13, < 0.2",
140
+ "langchain-core < 0.2",
141
+ "langchain-google-vertexai < 0.2",
142
+ ]
143
+
144
+ langchain_testing_extra_require = langchain_extra_require + [
145
+ "pytest-xdist",
146
+ ]
147
+
148
+ full_extra_require = list(
149
+ set(
150
+ tensorboard_extra_require
151
+ + metadata_extra_require
152
+ + xai_extra_require
153
+ + lit_extra_require
154
+ + featurestore_extra_require
155
+ + pipelines_extra_require
156
+ + datasets_extra_require
157
+ + endpoint_extra_require
158
+ + vizier_extra_require
159
+ + prediction_extra_require
160
+ + private_endpoints_extra_require
161
+ + autologging_extra_require
162
+ + preview_extra_require
163
+ + ray_extra_require
164
+ + reasoning_engine_extra_require
165
+ + rapid_evaluation_extra_require
166
+ )
167
+ )
168
+ testing_extra_require = (
169
+ full_extra_require
170
+ + profiler_extra_require
171
+ + [
172
+ "bigframes; python_version>='3.10'",
173
+ # google-api-core 2.x is required since kfp requires protobuf > 4
174
+ "google-api-core >= 2.11, < 3.0.0",
175
+ "grpcio-testing",
176
+ "ipython",
177
+ "kfp >= 2.6.0, < 3.0.0",
178
+ "pyfakefs",
179
+ "pytest-asyncio",
180
+ "pytest-xdist",
181
+ "scikit-learn",
182
+ # Lazy import requires > 2.12.0
183
+ "tensorflow == 2.13.0; python_version<='3.11'",
184
+ "tensorflow == 2.16.1; python_version>'3.11'",
185
+ # TODO(jayceeli) torch 2.1.0 has conflict with pyfakefs, will check if
186
+ # future versions fix this issue
187
+ "torch >= 2.0.0, < 2.1.0; python_version<='3.11'",
188
+ "torch >= 2.2.0; python_version>'3.11'",
189
+ "requests-toolbelt < 1.0.0",
190
+ "immutabledict",
191
+ "xgboost",
192
+ ]
193
+ )
194
+
195
+
196
+ setuptools.setup(
197
+ name=name,
198
+ version=version,
199
+ description=description,
200
+ long_description=readme,
201
+ author="Google LLC",
202
+ author_email="vertex-sdk-dev-pypi@google.com",
203
+ license="Apache 2.0",
204
+ url="https://github.com/googleapis/python-aiplatform",
205
+ platforms="Posix; MacOS X; Windows",
206
+ include_package_data=True,
207
+ install_requires=[f"google-cloud-aiplatform[all] == {version}"],
208
+ extras_require={
209
+ "endpoint": endpoint_extra_require,
210
+ "full": full_extra_require,
211
+ "metadata": metadata_extra_require,
212
+ "tensorboard": tensorboard_extra_require,
213
+ "testing": testing_extra_require,
214
+ "xai": xai_extra_require,
215
+ "lit": lit_extra_require,
216
+ "cloud_profiler": profiler_extra_require,
217
+ "pipelines": pipelines_extra_require,
218
+ "vizier": vizier_extra_require,
219
+ "prediction": prediction_extra_require,
220
+ "datasets": datasets_extra_require,
221
+ "private_endpoints": private_endpoints_extra_require,
222
+ "autologging": autologging_extra_require,
223
+ "preview": preview_extra_require,
224
+ "ray": ray_extra_require,
225
+ "ray_testing": ray_testing_extra_require,
226
+ "reasoningengine": reasoning_engine_extra_require,
227
+ "rapid_evaluation": rapid_evaluation_extra_require,
228
+ "langchain": langchain_extra_require,
229
+ "langchain_testing": langchain_testing_extra_require,
230
+ },
231
+ python_requires=">=3.8",
232
+ classifiers=[
233
+ "Development Status :: 5 - Production/Stable",
234
+ "Intended Audience :: Developers",
235
+ "Operating System :: OS Independent",
236
+ "Programming Language :: Python",
237
+ "Programming Language :: Python :: 3",
238
+ "Programming Language :: Python :: 3.8",
239
+ "Programming Language :: Python :: 3.9",
240
+ "Programming Language :: Python :: 3.10",
241
+ "Programming Language :: Python :: 3.11",
242
+ "Programming Language :: Python :: 3.12",
243
+ "Topic :: Internet",
244
+ "Topic :: Software Development :: Libraries :: Python Modules",
245
+ ],
246
+ zip_safe=False,
247
+ )
testbed/googleapis__python-aiplatform/pypi/_vertex_ai_placeholder/version.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # Copyright 2024 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ #
17
+
18
+ __version__ = "1.75.0"
testbed/googleapis__python-aiplatform/release-please-config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "$schema": "https://raw.githubusercontent.com/googleapis/release-please/main/schemas/config.json",
3
+ "packages": {
4
+ ".": {
5
+ "release-type": "python",
6
+ "extra-files": [
7
+ "google/cloud/aiplatform/version.py",
8
+ "google/cloud/aiplatform/gapic_version.py",
9
+ "google/cloud/aiplatform_v1/gapic_version.py",
10
+ "google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py",
11
+ "google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py",
12
+ "google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py",
13
+ "google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py",
14
+ "google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py",
15
+ "google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py",
16
+ "google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py",
17
+ "google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py",
18
+ "google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py",
19
+ "google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py",
20
+ "google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py",
21
+ "google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py",
22
+ "google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py",
23
+ "google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py",
24
+ "google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py",
25
+ "google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py",
26
+ "google/cloud/aiplatform_v1beta1/gapic_version.py",
27
+ "pypi/_vertex_ai_placeholder/version.py",
28
+ {
29
+ "type": "json",
30
+ "path": "samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json",
31
+ "jsonpath": "$.clientLibrary.version"
32
+ },
33
+ {
34
+ "type": "json",
35
+ "path": "samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json",
36
+ "jsonpath": "$.clientLibrary.version"
37
+ }
38
+ ]
39
+ }
40
+ },
41
+ "release-type": "python",
42
+ "plugins": [
43
+ {
44
+ "type": "sentence-case"
45
+ }
46
+ ],
47
+ "initial-version": "0.1.0"
48
+ }
testbed/googleapis__python-aiplatform/renovate.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "extends": [
3
+ "config:base",
4
+ "group:all",
5
+ ":preserveSemverRanges",
6
+ ":disableDependencyDashboard"
7
+ ],
8
+ "ignorePaths": [".pre-commit-config.yaml", ".kokoro/requirements.txt", "setup.py"],
9
+ "pip_requirements": {
10
+ "fileMatch": ["requirements-test.txt", "samples/[\\S/]*constraints.txt", "samples/[\\S/]*constraints-test.txt"]
11
+ }
12
+ }
testbed/googleapis__python-aiplatform/sdk_schema_tests/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ #
testbed/googleapis__python-aiplatform/sdk_schema_tests/common_contract.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ #
15
+
16
+ expected_generate_content_common_arg_keys = (
17
+ "self",
18
+ "contents",
19
+ "generation_config",
20
+ "safety_settings",
21
+ "tools",
22
+ "tool_config",
23
+ "stream",
24
+ )
testbed/googleapis__python-aiplatform/sdk_schema_tests/method_signature_tests/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ #
testbed/googleapis__python-aiplatform/sdk_schema_tests/method_signature_tests/method_signature_tests.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tests for method_signature."""
2
+
3
+ from inspect import signature
4
+ import unittest
5
+
6
+ from vertexai.generative_models import GenerativeModel as VertexAIGenerativeModel
7
+ from google.generativeai import GenerativeModel as GoogleAIGenerativeModel
8
+ from sdk_schema_tests import common_contract
9
+
10
+
11
+ _VERTEX_AI_SDK_NAME = "Vertex AI SDK"
12
+ _GOOGLE_AI_SDK_NAME = "Google AI SDK"
13
+
14
+
15
+ class TestGenerativeModelMethodSignatures(unittest.TestCase):
16
+ """Tests for method signatures of GenerativeModel."""
17
+
18
+ def _test_method_argument_key_in_both_sdks(
19
+ self,
20
+ method_under_test,
21
+ expected_method_arg_keys,
22
+ sdk_name
23
+ ):
24
+ method_signature = signature(method_under_test)
25
+ actual_method_arg_keys = method_signature.parameters.keys()
26
+ for expected_arg_key in expected_method_arg_keys:
27
+ self.assertIn(
28
+ member=expected_arg_key,
29
+ container=actual_method_arg_keys,
30
+ msg=(
31
+ f"[{sdk_name}][method {method_under_test.__name__}]: expected"
32
+ f" common arugment {expected_arg_key} not found in actual arugment"
33
+ f" list: {actual_method_arg_keys}"
34
+ ),
35
+ )
36
+
37
+ def test_generate_content_method_signature(self):
38
+ expected_common_arg_keys = (
39
+ common_contract.expected_generate_content_common_arg_keys
40
+ )
41
+ test_arguments = [
42
+ {
43
+ "method_under_test": VertexAIGenerativeModel.generate_content,
44
+ "expected_method_arg_keys": expected_common_arg_keys,
45
+ "sdk_name": _VERTEX_AI_SDK_NAME,
46
+ },
47
+ {
48
+ "method_under_test": GoogleAIGenerativeModel.generate_content,
49
+ "expected_method_arg_keys": expected_common_arg_keys,
50
+ "sdk_name": _GOOGLE_AI_SDK_NAME,
51
+ },
52
+ ]
53
+ for test_argument in test_arguments:
54
+ self._test_method_argument_key_in_both_sdks(**test_argument)
testbed/googleapis__python-aiplatform/setup.cfg ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ #
3
+ # Copyright 2023 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # https://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # Generated by synthtool. DO NOT EDIT!
18
+ [bdist_wheel]
19
+ universal = 1
testbed/googleapis__python-aiplatform/setup.py ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # Copyright 2022 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ #
17
+
18
+ import io
19
+ import os
20
+
21
+ import setuptools # type: ignore
22
+
23
+ name = "google-cloud-aiplatform"
24
+ description = "Vertex AI API client library"
25
+
26
+ package_root = os.path.abspath(os.path.dirname(__file__))
27
+ readme_filename = os.path.join(package_root, "README.rst")
28
+ with io.open(readme_filename, encoding="utf-8") as readme_file:
29
+ readme = readme_file.read()
30
+
31
+ version = {}
32
+ with open(os.path.join(package_root, "google/cloud/aiplatform/version.py")) as fp:
33
+ exec(fp.read(), version)
34
+ version = version["__version__"]
35
+
36
+ packages = [
37
+ package
38
+ for package in setuptools.PEP420PackageFinder.find()
39
+ if package.startswith("google") or package.startswith("vertexai")
40
+ ]
41
+
42
+ # Add vertex_ray relative packages
43
+ packages += [
44
+ package.replace("google.cloud.aiplatform.vertex_ray", "vertex_ray")
45
+ for package in setuptools.PEP420PackageFinder.find()
46
+ if package.startswith("google.cloud.aiplatform.vertex_ray")
47
+ ]
48
+
49
+ profiler_extra_require = [
50
+ "tensorboard-plugin-profile >= 2.4.0, <2.18.0", # <3.0.0dev",
51
+ "werkzeug >= 2.0.0, <2.1.0dev",
52
+ "tensorflow >=2.4.0, <3.0.0dev",
53
+ ]
54
+ tensorboard_extra_require = [
55
+ "tensorflow >=2.3.0, <3.0.0dev; python_version<='3.11'"
56
+ ] + profiler_extra_require
57
+
58
+ metadata_extra_require = ["pandas >= 1.0.0", "numpy>=1.15.0"]
59
+ xai_extra_require = ["tensorflow >=2.3.0, <3.0.0dev"]
60
+ lit_extra_require = [
61
+ "tensorflow >= 2.3.0, <3.0.0dev",
62
+ "pandas >= 1.0.0",
63
+ "lit-nlp == 0.4.0",
64
+ "explainable-ai-sdk >= 1.0.0",
65
+ ]
66
+ featurestore_extra_require = [
67
+ "google-cloud-bigquery-storage",
68
+ "pandas >= 1.0.0",
69
+ "pyarrow >= 6.0.1",
70
+ ]
71
+ pipelines_extra_require = [
72
+ "pyyaml>=5.3.1,<7",
73
+ ]
74
+ datasets_extra_require = [
75
+ "pyarrow >= 3.0.0, < 8.0dev; python_version<'3.11'",
76
+ "pyarrow >= 10.0.1; python_version=='3.11'",
77
+ "pyarrow >= 14.0.0; python_version>='3.12'",
78
+ ]
79
+
80
+ vizier_extra_require = [
81
+ "google-vizier>=0.1.6",
82
+ ]
83
+
84
+ prediction_extra_require = [
85
+ "docker >= 5.0.3",
86
+ "fastapi >= 0.71.0, <=0.114.0",
87
+ "httpx >=0.23.0, <0.25.0", # Optional dependency of fastapi
88
+ "starlette >= 0.17.1",
89
+ "uvicorn[standard] >= 0.16.0",
90
+ ]
91
+
92
+ endpoint_extra_require = ["requests >= 2.28.1"]
93
+
94
+ private_endpoints_extra_require = [
95
+ "urllib3 >=1.21.1, <1.27",
96
+ "requests >= 2.28.1",
97
+ ]
98
+
99
+ autologging_extra_require = ["mlflow>=1.27.0,<=2.16.0"]
100
+
101
+ preview_extra_require = []
102
+
103
+ ray_extra_require = [
104
+ # Cluster only supports 2.9.3 and 2.33.0. Keep 2.4.0 for our testing environment.
105
+ # Note that testing is submiting a job in a cluster with Ray 2.9.3 remotely.
106
+ (
107
+ "ray[default] >= 2.4, <= 2.33.0,!= 2.5.*,!= 2.6.*,!= 2.7.*,!="
108
+ " 2.8.*,!=2.9.0,!=2.9.1,!=2.9.2, !=2.10.*, !=2.11.*, !=2.12.*, !=2.13.*, !="
109
+ " 2.14.*, !=2.15.*, !=2.16.*, !=2.17.*, !=2.18.*, !=2.19.*, !=2.20.*, !="
110
+ " 2.21.*, !=2.22.*, !=2.23.*, !=2.24.*, !=2.25.*, !=2.26.*, !=2.27.*, !="
111
+ " 2.28.*, !=2.29.*, !=2.30.*, !=2.31.*, !=2.32.*; python_version<'3.11'"
112
+ ),
113
+ # To avoid ImportError: cannot import name 'packaging' from 'pkg_resources'
114
+ "setuptools < 70.0.0",
115
+ # Ray Data v2.4 in Python 3.11 is broken, but got fixed in Ray v2.5.
116
+ "ray[default] >= 2.5, <= 2.33.0; python_version=='3.11'",
117
+ "google-cloud-bigquery-storage",
118
+ "google-cloud-bigquery",
119
+ "pandas >= 1.0.0",
120
+ "pyarrow >= 6.0.1",
121
+ "immutabledict",
122
+ ]
123
+
124
+ genai_requires = (
125
+ "pydantic < 3",
126
+ "typing_extensions",
127
+ "docstring_parser < 1",
128
+ )
129
+
130
+ ray_testing_extra_require = ray_extra_require + [
131
+ "pytest-xdist",
132
+ # ray train extras required for prediction tests
133
+ "ray[train]",
134
+ # Framework version constraints copied from testing_extra_require
135
+ "scikit-learn<1.6.0",
136
+ "tensorflow",
137
+ "torch >= 2.0.0, < 2.1.0",
138
+ "xgboost",
139
+ "xgboost_ray",
140
+ ]
141
+
142
+ reasoning_engine_extra_require = [
143
+ "cloudpickle >= 3.0, < 4.0",
144
+ "google-cloud-trace < 2",
145
+ "opentelemetry-sdk < 2",
146
+ "opentelemetry-exporter-gcp-trace < 2",
147
+ "pydantic >= 2.6.3, < 3",
148
+ "typing_extensions",
149
+ ]
150
+
151
+ evaluation_extra_require = [
152
+ "pandas >= 1.0.0",
153
+ "tqdm>=4.23.0",
154
+ ]
155
+
156
+ langchain_extra_require = [
157
+ "langchain >= 0.1.16, < 0.4",
158
+ "langchain-core < 0.4",
159
+ "langchain-google-vertexai < 3",
160
+ "openinference-instrumentation-langchain >= 0.1.19, < 0.2",
161
+ ]
162
+
163
+ langchain_testing_extra_require = list(
164
+ set(
165
+ langchain_extra_require
166
+ + reasoning_engine_extra_require
167
+ + ["absl-py", "pytest-xdist"]
168
+ )
169
+ )
170
+
171
+ tokenization_extra_require = ["sentencepiece >= 0.2.0"]
172
+ tokenization_testing_extra_require = tokenization_extra_require + ["nltk"]
173
+
174
+ full_extra_require = list(
175
+ set(
176
+ tensorboard_extra_require
177
+ + metadata_extra_require
178
+ + xai_extra_require
179
+ + lit_extra_require
180
+ + featurestore_extra_require
181
+ + pipelines_extra_require
182
+ + datasets_extra_require
183
+ + endpoint_extra_require
184
+ + vizier_extra_require
185
+ + prediction_extra_require
186
+ + private_endpoints_extra_require
187
+ + autologging_extra_require
188
+ + preview_extra_require
189
+ + ray_extra_require
190
+ + evaluation_extra_require
191
+ )
192
+ )
193
+ testing_extra_require = (
194
+ full_extra_require
195
+ + profiler_extra_require
196
+ + tokenization_testing_extra_require
197
+ + [
198
+ # aiohttp is required for async rest tests (need google-auth[aiohttp],
199
+ # but can't specify extras in constraints files)
200
+ "aiohttp",
201
+ "bigframes; python_version>='3.10'",
202
+ # google-api-core 2.x is required since kfp requires protobuf > 4
203
+ "google-api-core >= 2.11, < 3.0.0",
204
+ "grpcio-testing",
205
+ "ipython",
206
+ "kfp >= 2.6.0, < 3.0.0",
207
+ "pytest-asyncio",
208
+ "pytest-xdist",
209
+ "scikit-learn<1.6.0; python_version<='3.10'",
210
+ "scikit-learn; python_version>'3.10'",
211
+ # Lazy import requires > 2.12.0
212
+ "tensorflow == 2.13.0; python_version<='3.11'",
213
+ "tensorflow == 2.16.1; python_version>'3.11'",
214
+ # TODO(jayceeli) torch 2.1.0 has conflict with pyfakefs, will check if
215
+ # future versions fix this issue
216
+ "torch >= 2.0.0, < 2.1.0; python_version<='3.11'",
217
+ "torch >= 2.2.0; python_version>'3.11'",
218
+ "requests-toolbelt < 1.0.0",
219
+ "immutabledict",
220
+ "xgboost",
221
+ ]
222
+ )
223
+
224
+
225
+ setuptools.setup(
226
+ name=name,
227
+ version=version,
228
+ description=description,
229
+ long_description=readme,
230
+ packages=packages,
231
+ package_dir={"vertex_ray": "google/cloud/aiplatform/vertex_ray"},
232
+ package_data={"": ["*.html.j2"]},
233
+ entry_points={
234
+ "console_scripts": [
235
+ "tb-gcp-uploader=google.cloud.aiplatform.tensorboard.uploader_main:run_main"
236
+ ],
237
+ },
238
+ namespace_packages=("google", "google.cloud"),
239
+ author="Google LLC",
240
+ author_email="googleapis-packages@google.com",
241
+ license="Apache 2.0",
242
+ url="https://github.com/googleapis/python-aiplatform",
243
+ platforms="Posix; MacOS X; Windows",
244
+ include_package_data=True,
245
+ install_requires=(
246
+ (
247
+ "google-api-core[grpc] >= 1.34.1,"
248
+ " <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*"
249
+ ),
250
+ "google-auth >= 2.14.1, <3.0.0dev",
251
+ "proto-plus >= 1.22.3, <2.0.0dev",
252
+ "protobuf>=3.20.2,<6.0.0dev,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5",
253
+ "packaging >= 14.3",
254
+ "google-cloud-storage >= 1.32.0, < 3.0.0dev",
255
+ "google-cloud-bigquery >= 1.15.0, < 4.0.0dev, !=3.20.0",
256
+ "google-cloud-resource-manager >= 1.3.3, < 3.0.0dev",
257
+ "shapely < 3.0.0dev",
258
+ )
259
+ + genai_requires,
260
+ extras_require={
261
+ "endpoint": endpoint_extra_require,
262
+ "full": full_extra_require,
263
+ "metadata": metadata_extra_require,
264
+ "tensorboard": tensorboard_extra_require,
265
+ "testing": testing_extra_require,
266
+ "xai": xai_extra_require,
267
+ "lit": lit_extra_require,
268
+ "cloud_profiler": profiler_extra_require,
269
+ "pipelines": pipelines_extra_require,
270
+ "vizier": vizier_extra_require,
271
+ "prediction": prediction_extra_require,
272
+ "datasets": datasets_extra_require,
273
+ "private_endpoints": private_endpoints_extra_require,
274
+ "autologging": autologging_extra_require,
275
+ "preview": preview_extra_require,
276
+ "ray": ray_extra_require,
277
+ "ray_testing": ray_testing_extra_require,
278
+ "reasoningengine": reasoning_engine_extra_require,
279
+ "evaluation": evaluation_extra_require,
280
+ "langchain": langchain_extra_require,
281
+ "langchain_testing": langchain_testing_extra_require,
282
+ "tokenization": tokenization_extra_require,
283
+ },
284
+ python_requires=">=3.8",
285
+ classifiers=[
286
+ "Development Status :: 5 - Production/Stable",
287
+ "Intended Audience :: Developers",
288
+ "Operating System :: OS Independent",
289
+ "Programming Language :: Python",
290
+ "Programming Language :: Python :: 3",
291
+ "Programming Language :: Python :: 3.8",
292
+ "Programming Language :: Python :: 3.9",
293
+ "Programming Language :: Python :: 3.10",
294
+ "Programming Language :: Python :: 3.11",
295
+ "Programming Language :: Python :: 3.12",
296
+ "Topic :: Internet",
297
+ "Topic :: Software Development :: Libraries :: Python Modules",
298
+ ],
299
+ zip_safe=False,
300
+ )
testbed/googleapis__python-aiplatform/testing/constraints-langchain.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ langchain
2
+ langchain-core
3
+ langchain-google-vertexai
testbed/googleapis__python-aiplatform/testing/constraints-ray-2.33.0.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ray==2.33.0
2
+ # Below constraints are inherited from constraints-3.10.txt
3
+ google-api-core
4
+ proto-plus==1.22.3
5
+ protobuf
6
+ mock==4.0.2
7
+ google-cloud-storage==2.2.1 # Increased for kfp 2.0 compatibility
8
+ packaging==24.1 # Increased to unbreak canonicalize_version error (b/377774673)
9
+ grpcio-testing==1.34.0
10
+ mlflow==1.30.1 # Pinned to speed up installation
11
+ pytest-xdist==3.3.1 # Pinned to unbreak unit tests
12
+ IPython # Added to test supernova rich html buttons
13
+
testbed/googleapis__python-aiplatform/testing/constraints-ray-2.4.0.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ray==2.4.0
2
+ # Below constraints are inherited from constraints-3.10.txt
3
+ google-api-core
4
+ proto-plus==1.22.3
5
+ protobuf
6
+ mock==4.0.2
7
+ google-cloud-storage==2.2.1 # Increased for kfp 2.0 compatibility
8
+ packaging==20.0 # Increased for compatibility with MLFlow
9
+ grpcio-testing==1.34.0
10
+ mlflow==1.30.1 # Pinned to speed up installation
11
+ pytest-xdist==3.3.1 # Pinned to unbreak unit tests
12
+ IPython # Added to test supernova rich html buttons
13
+
testbed/googleapis__python-aiplatform/tests/system/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2022 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
testbed/googleapis__python-aiplatform/tests/system/aiplatform/e2e_base.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # Copyright 2021 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ #
17
+
18
+ import abc
19
+ import asyncio
20
+ import importlib
21
+ import logging
22
+ import os
23
+ import pytest
24
+ import uuid
25
+
26
+ from typing import Any, Dict, Generator
27
+
28
+ from google.api_core import exceptions
29
+ from google.cloud import aiplatform
30
+ import vertexai
31
+ from google.cloud import bigquery
32
+ from google.cloud import resourcemanager
33
+ from google.cloud import storage
34
+ from google.cloud.aiplatform import initializer
35
+
36
+ _PROJECT = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT")
37
+ _VPC_NETWORK_URI = os.getenv("_VPC_NETWORK_URI")
38
+ _LOCATION = "us-central1"
39
+
40
+
41
+ class TestEndToEnd(metaclass=abc.ABCMeta):
42
+ @property
43
+ @classmethod
44
+ @abc.abstractmethod
45
+ def _temp_prefix(cls) -> str:
46
+ """Prefix to staging bucket and display names created by this end-to-end test.
47
+ Keep the string as short as possible and use kebab case, starting with a lowercase letter.
48
+
49
+ Example: `"temp-vertex-hpt-test"`
50
+ """
51
+ pass
52
+
53
+ @classmethod
54
+ def _make_display_name(cls, key: str) -> str:
55
+ """Helper method to make unique display_names.
56
+
57
+ Args:
58
+ key (str): Required. Identifier for the display name.
59
+ Returns:
60
+ Unique display name.
61
+ """
62
+ return f"{cls._temp_prefix}-{key}-{uuid.uuid4()}"
63
+
64
+ def setup_method(self):
65
+ importlib.reload(initializer)
66
+ importlib.reload(aiplatform)
67
+ importlib.reload(vertexai)
68
+
69
+ @pytest.fixture(scope="class")
70
+ def shared_state(self) -> Generator[Dict[str, Any], None, None]:
71
+ shared_state = {}
72
+ yield shared_state
73
+
74
+ @pytest.fixture(scope="class")
75
+ def prepare_staging_bucket(
76
+ self, shared_state: Dict[str, Any]
77
+ ) -> Generator[storage.bucket.Bucket, None, None]:
78
+ """Create a staging bucket and store bucket resource object in shared state."""
79
+
80
+ staging_bucket_name = f"{self._temp_prefix.lower()}-{uuid.uuid4()}"[:63]
81
+ shared_state["staging_bucket_name"] = staging_bucket_name
82
+
83
+ storage_client = storage.Client(project=_PROJECT)
84
+ shared_state["storage_client"] = storage_client
85
+
86
+ bucket = storage_client.create_bucket(
87
+ staging_bucket_name, project=_PROJECT, location=_LOCATION
88
+ )
89
+
90
+ # TODO(#1415) Once PR Is merged, use the added utilities to
91
+ # provide create/view access to Pipeline's default service account (compute)
92
+ project_number = (
93
+ resourcemanager.ProjectsClient()
94
+ .get_project(name=f"projects/{_PROJECT}")
95
+ .name.split("/", 1)[1]
96
+ )
97
+
98
+ service_account = f"{project_number}-compute@developer.gserviceaccount.com"
99
+ bucket_iam_policy = bucket.get_iam_policy()
100
+ bucket_iam_policy.setdefault("roles/storage.objectCreator", set()).add(
101
+ f"serviceAccount:{service_account}"
102
+ )
103
+ bucket_iam_policy.setdefault("roles/storage.objectViewer", set()).add(
104
+ f"serviceAccount:{service_account}"
105
+ )
106
+ bucket.set_iam_policy(bucket_iam_policy)
107
+
108
+ shared_state["bucket"] = bucket
109
+ yield
110
+
111
+ @pytest.fixture(scope="class")
112
+ def delete_staging_bucket(self, shared_state: Dict[str, Any]):
113
+ """Delete the staging bucket and all it's contents"""
114
+
115
+ yield
116
+
117
+ # Get the staging bucket used for testing and wipe it
118
+ bucket = shared_state["bucket"]
119
+ bucket.delete(force=True)
120
+
121
+ @pytest.fixture(scope="class")
122
+ def prepare_bigquery_dataset(
123
+ self, shared_state: Dict[str, Any]
124
+ ) -> Generator[bigquery.dataset.Dataset, None, None]:
125
+ """Create a bigquery dataset and store bigquery resource object in shared state."""
126
+
127
+ bigquery_client = bigquery.Client(project=_PROJECT)
128
+ shared_state["bigquery_client"] = bigquery_client
129
+
130
+ dataset_name = f"{self._temp_prefix.lower()}_{uuid.uuid4()}".replace("-", "_")
131
+ dataset_id = f"{_PROJECT}.{dataset_name}"
132
+ shared_state["bigquery_dataset_id"] = dataset_id
133
+
134
+ dataset = bigquery.Dataset(dataset_id)
135
+ dataset.location = _LOCATION
136
+ shared_state["bigquery_dataset"] = bigquery_client.create_dataset(dataset)
137
+
138
+ yield
139
+
140
+ @pytest.fixture(scope="class")
141
+ def delete_bigquery_dataset(self, shared_state: Dict[str, Any]):
142
+ """Delete the bigquery dataset"""
143
+
144
+ yield
145
+
146
+ # Get the bigquery dataset id used for testing and wipe it
147
+ bigquery_dataset = shared_state["bigquery_dataset"]
148
+ bigquery_client = shared_state["bigquery_client"]
149
+ bigquery_client.delete_dataset(
150
+ bigquery_dataset.dataset_id, delete_contents=True, not_found_ok=True
151
+ ) # Make an API request.
152
+
153
+ @pytest.fixture(scope="class")
154
+ def bigquery_dataset(self) -> Generator[bigquery.dataset.Dataset, None, None]:
155
+ """Create a bigquery dataset and store bigquery resource object in shared state."""
156
+
157
+ bigquery_client = bigquery.Client(project=_PROJECT)
158
+
159
+ dataset_name = f"{self._temp_prefix.lower()}_{uuid.uuid4()}".replace("-", "_")
160
+ dataset_id = f"{_PROJECT}.{dataset_name}"
161
+
162
+ dataset = bigquery.Dataset(dataset_id)
163
+ dataset.location = _LOCATION
164
+ dataset = bigquery_client.create_dataset(dataset)
165
+
166
+ yield dataset
167
+
168
+ bigquery_client.delete_dataset(
169
+ dataset.dataset_id, delete_contents=True, not_found_ok=True
170
+ ) # Make an API request.
171
+
172
+ @pytest.fixture(scope="class")
173
+ def tear_down_resources(self, shared_state: Dict[str, Any]):
174
+ """Delete every Vertex AI resource created during test"""
175
+
176
+ yield
177
+
178
+ if "resources" not in shared_state:
179
+ return
180
+
181
+ # TODO(b/218310362): Add resource deletion system tests
182
+ # Bring all Endpoints to the front of the list
183
+ # Ensures Models are undeployed first before we attempt deletion
184
+ shared_state["resources"].sort(
185
+ key=lambda r: 1
186
+ if isinstance(r, aiplatform.Endpoint)
187
+ or isinstance(r, aiplatform.MatchingEngineIndexEndpoint)
188
+ or isinstance(r, aiplatform.Experiment)
189
+ else 2
190
+ )
191
+
192
+ for resource in shared_state["resources"]:
193
+ try:
194
+ if isinstance(
195
+ resource,
196
+ (
197
+ aiplatform.Endpoint,
198
+ aiplatform.Featurestore,
199
+ aiplatform.MatchingEngineIndexEndpoint,
200
+ ),
201
+ ):
202
+ # For endpoint, undeploy model then delete endpoint
203
+ # For featurestore, force delete its entity_types and features with the featurestore
204
+ resource.delete(force=True)
205
+ elif isinstance(resource, aiplatform.Experiment):
206
+ resource.delete(delete_backing_tensorboard_runs=True)
207
+ else:
208
+ resource.delete()
209
+ except (exceptions.GoogleAPIError, RuntimeError) as e:
210
+ logging.exception(f"Could not delete resource: {resource} due to: {e}")
211
+
212
+ @pytest.fixture(scope="session")
213
+ def event_loop(event_loop):
214
+ loop = asyncio.get_event_loop()
215
+ yield loop
216
+ loop.close()
testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_custom_job.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # Copyright 2023 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ #
17
+
18
+ import os
19
+
20
+ import pytest
21
+ from unittest import mock
22
+
23
+ from google.cloud import aiplatform
24
+ from google.cloud.aiplatform.constants import base as constants
25
+ from google.cloud.aiplatform.utils import resource_manager_utils
26
+ from google.cloud.aiplatform.compat.types import job_state as gca_job_state
27
+ from tests.system.aiplatform import e2e_base
28
+
29
+ _PREBUILT_CONTAINER_IMAGE = (
30
+ "us-docker.pkg.dev/vertex-ai/training/sklearn-cpu.1-0:latest"
31
+ )
32
+ _CUSTOM_CONTAINER_IMAGE = "python:3.8"
33
+
34
+ _DIR_NAME = os.path.dirname(os.path.abspath(__file__))
35
+ _LOCAL_TRAINING_SCRIPT_PATH = os.path.join(
36
+ _DIR_NAME, "test_resources/custom_job_script.py"
37
+ )
38
+
39
+
40
+ @mock.patch.object(
41
+ constants,
42
+ "AIPLATFORM_DEPENDENCY_PATH",
43
+ "google-cloud-aiplatform @ git+https://github.com/googleapis/"
44
+ f"python-aiplatform.git@{os.environ['KOKORO_GIT_COMMIT']}#egg=google-cloud-aiplatform"
45
+ if os.environ.get("KOKORO_GIT_COMMIT")
46
+ else constants.AIPLATFORM_DEPENDENCY_PATH,
47
+ )
48
+ @mock.patch.object(
49
+ constants,
50
+ "AIPLATFORM_AUTOLOG_DEPENDENCY_PATH",
51
+ "google-cloud-aiplatform[autologging] @ git+https://github.com/googleapis/"
52
+ f"python-aiplatform.git@{os.environ['KOKORO_GIT_COMMIT']}#egg=google-cloud-aiplatform"
53
+ if os.environ.get("KOKORO_GIT_COMMIT")
54
+ else constants.AIPLATFORM_AUTOLOG_DEPENDENCY_PATH,
55
+ )
56
+ @pytest.mark.usefixtures(
57
+ "prepare_staging_bucket", "delete_staging_bucket", "tear_down_resources"
58
+ )
59
+ class TestCustomJob(e2e_base.TestEndToEnd):
60
+
61
+ _temp_prefix = "temp-vertex-sdk-custom-job"
62
+
63
+ def setup_class(cls):
64
+ cls._experiment_name = cls._make_display_name("experiment")[:60]
65
+ cls._experiment_run_name = cls._make_display_name("experiment-run")[:60]
66
+
67
+ project_number = resource_manager_utils.get_project_number(e2e_base._PROJECT)
68
+ cls._service_account = f"{project_number}-compute@developer.gserviceaccount.com"
69
+
70
+ def test_from_local_script_prebuilt_container(self, shared_state):
71
+ shared_state["resources"] = []
72
+
73
+ aiplatform.init(
74
+ project=e2e_base._PROJECT,
75
+ location=e2e_base._LOCATION,
76
+ staging_bucket=shared_state["staging_bucket_name"],
77
+ )
78
+
79
+ display_name = self._make_display_name("custom-job")
80
+
81
+ custom_job = aiplatform.CustomJob.from_local_script(
82
+ display_name=display_name,
83
+ script_path=_LOCAL_TRAINING_SCRIPT_PATH,
84
+ container_uri=_PREBUILT_CONTAINER_IMAGE,
85
+ requirements=["scikit-learn", "pandas"],
86
+ )
87
+ try:
88
+ custom_job.run()
89
+ finally:
90
+ shared_state["resources"].append(custom_job)
91
+
92
+ assert custom_job.state == gca_job_state.JobState.JOB_STATE_SUCCEEDED
93
+
94
+ def test_from_local_script_custom_container(self, shared_state):
95
+
96
+ aiplatform.init(
97
+ project=e2e_base._PROJECT,
98
+ location=e2e_base._LOCATION,
99
+ staging_bucket=shared_state["staging_bucket_name"],
100
+ )
101
+
102
+ display_name = self._make_display_name("custom-job")
103
+
104
+ custom_job = aiplatform.CustomJob.from_local_script(
105
+ display_name=display_name,
106
+ script_path=_LOCAL_TRAINING_SCRIPT_PATH,
107
+ container_uri=_CUSTOM_CONTAINER_IMAGE,
108
+ requirements=["scikit-learn", "pandas"],
109
+ )
110
+ try:
111
+ custom_job.run()
112
+ finally:
113
+ shared_state["resources"].append(custom_job)
114
+
115
+ assert custom_job.state == gca_job_state.JobState.JOB_STATE_SUCCEEDED
116
+
117
+ def test_from_local_script_enable_autolog_prebuilt_container(self, shared_state):
118
+
119
+ aiplatform.init(
120
+ project=e2e_base._PROJECT,
121
+ location=e2e_base._LOCATION,
122
+ staging_bucket=shared_state["staging_bucket_name"],
123
+ experiment=self._experiment_name,
124
+ )
125
+
126
+ shared_state["resources"].append(
127
+ aiplatform.metadata.metadata._experiment_tracker.experiment
128
+ )
129
+
130
+ display_name = self._make_display_name("custom-job")
131
+
132
+ custom_job = aiplatform.CustomJob.from_local_script(
133
+ display_name=display_name,
134
+ script_path=_LOCAL_TRAINING_SCRIPT_PATH,
135
+ container_uri=_PREBUILT_CONTAINER_IMAGE,
136
+ requirements=["scikit-learn", "pandas"],
137
+ enable_autolog=True,
138
+ )
139
+
140
+ try:
141
+ with aiplatform.start_run(self._experiment_run_name) as run:
142
+ shared_state["resources"].append(run)
143
+ custom_job.run(
144
+ experiment=self._experiment_name,
145
+ experiment_run=run,
146
+ service_account=self._service_account,
147
+ )
148
+ finally:
149
+ shared_state["resources"].append(custom_job)
150
+
151
+ assert custom_job.state == gca_job_state.JobState.JOB_STATE_SUCCEEDED
152
+
153
+ def test_from_local_script_enable_autolog_custom_container(self, shared_state):
154
+
155
+ aiplatform.init(
156
+ project=e2e_base._PROJECT,
157
+ location=e2e_base._LOCATION,
158
+ staging_bucket=shared_state["staging_bucket_name"],
159
+ )
160
+
161
+ display_name = self._make_display_name("custom-job")
162
+
163
+ custom_job = aiplatform.CustomJob.from_local_script(
164
+ display_name=display_name,
165
+ script_path=_LOCAL_TRAINING_SCRIPT_PATH,
166
+ container_uri=_CUSTOM_CONTAINER_IMAGE,
167
+ requirements=["scikit-learn", "pandas"],
168
+ enable_autolog=True,
169
+ )
170
+
171
+ # Let the job auto-create the experiment run.
172
+ try:
173
+ custom_job.run(
174
+ experiment=self._experiment_name,
175
+ service_account=self._service_account,
176
+ )
177
+ finally:
178
+ shared_state["resources"].append(custom_job)
179
+ experiment_run_resource = aiplatform.Context.get(
180
+ custom_job.job_spec.experiment_run
181
+ )
182
+ if experiment_run_resource:
183
+ shared_state["resources"].append(experiment_run_resource)
184
+
185
+ assert custom_job.state == gca_job_state.JobState.JOB_STATE_SUCCEEDED
testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_dataset.py ADDED
@@ -0,0 +1,436 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # Copyright 2022 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ #
17
+
18
+ import os
19
+ import uuid
20
+ import pytest
21
+ import importlib
22
+
23
+ import pandas as pd
24
+ import re
25
+
26
+ from datetime import datetime
27
+
28
+ from google.api_core import exceptions
29
+ from google.api_core import client_options
30
+
31
+ from google.cloud import aiplatform
32
+ from google.cloud import bigquery
33
+ from google.cloud import storage
34
+ from google.cloud.aiplatform import utils
35
+ from google.cloud.aiplatform import initializer
36
+ from google.cloud.aiplatform.compat.services import (
37
+ dataset_service_client_v1 as dataset_service,
38
+ )
39
+
40
+ from test_utils.vpcsc_config import vpcsc_config
41
+
42
+ from tests.system.aiplatform import e2e_base
43
+
44
+ _TEST_PROJECT = e2e_base._PROJECT
45
+ _TEST_LOCATION = e2e_base._LOCATION
46
+ TEST_BUCKET = os.environ.get(
47
+ "GCLOUD_TEST_SAMPLES_BUCKET", "cloud-samples-data-us-central1"
48
+ )
49
+
50
+ _TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}"
51
+ _TEST_API_ENDPOINT = f"{_TEST_LOCATION}-aiplatform.googleapis.com"
52
+ _TEST_IMAGE_DATASET_ID = "1997950066622464000" # permanent_50_flowers_dataset
53
+ _TEST_TEXT_DATASET_ID = (
54
+ "6203215905493614592" # permanent_text_entity_extraction_dataset
55
+ )
56
+ _TEST_DATASET_DISPLAY_NAME = "permanent_50_flowers_dataset"
57
+ _TEST_DATASET_LABELS = {"test": "labels"}
58
+ _TEST_DATASET_DESCRIPTION = "test description"
59
+ _TEST_TABULAR_CLASSIFICATION_GCS_SOURCE = "gs://ucaip-sample-resources/iris_1000.csv"
60
+ _TEST_FORECASTING_BQ_SOURCE = (
61
+ "bq://ucaip-sample-tests:ucaip_test_us_central1.2020_sales_train"
62
+ )
63
+ _TEST_TEXT_ENTITY_EXTRACTION_GCS_SOURCE = "gs://ucaip-samples-us-central1/sdk_system_test_resources/text_entity_extraction_dataset_small.jsonl"
64
+ _TEST_IMAGE_OBJECT_DETECTION_GCS_SOURCE = "gs://cloud-samples-data-us-central1/ai-platform-unified/datasets/images/isg_data.jsonl"
65
+ _TEST_TEXT_ENTITY_IMPORT_SCHEMA = "gs://google-cloud-aiplatform/schema/dataset/ioformat/text_extraction_io_format_1.0.0.yaml"
66
+ _TEST_IMAGE_OBJ_DET_SEGMENTATION_IMPORT_SCHEMA = "gs://google-cloud-aiplatform/schema/dataset/ioformat/image_segmentation_io_format_1.0.0.yaml"
67
+
68
+ # create_from_dataframe
69
+ _TEST_BOOL_COL = "bool_col"
70
+ _TEST_BOOL_ARR_COL = "bool_array_col"
71
+ _TEST_DOUBLE_COL = "double_col"
72
+ _TEST_DOUBLE_ARR_COL = "double_array_col"
73
+ _TEST_INT_COL = "int64_col"
74
+ _TEST_INT_ARR_COL = "int64_array_col"
75
+ _TEST_STR_COL = "string_col"
76
+ _TEST_STR_ARR_COL = "string_array_col"
77
+ _TEST_BYTES_COL = "bytes_col"
78
+ _TEST_TIMESTAMP_COL = "timestamp_col"
79
+ _TEST_DATETIME_COL = "datetime_col"
80
+ _TEST_DF_COLUMN_NAMES = [
81
+ _TEST_BOOL_COL,
82
+ _TEST_BOOL_ARR_COL,
83
+ _TEST_DOUBLE_COL,
84
+ _TEST_DOUBLE_ARR_COL,
85
+ _TEST_INT_COL,
86
+ _TEST_INT_ARR_COL,
87
+ _TEST_STR_COL,
88
+ _TEST_STR_ARR_COL,
89
+ _TEST_BYTES_COL,
90
+ _TEST_TIMESTAMP_COL,
91
+ _TEST_DATETIME_COL,
92
+ ]
93
+
94
+ _TEST_TIME_NOW = datetime.now()
95
+ _TEST_TIMESTAMP_WITH_TIMEZONE = pd.Timestamp(_TEST_TIME_NOW, tz="US/Pacific")
96
+ _TEST_TIMESTAMP_WITHOUT_TIMEZONE = pd.Timestamp(_TEST_TIME_NOW)
97
+
98
+ _TEST_DATAFRAME = pd.DataFrame(
99
+ data=[
100
+ [
101
+ False,
102
+ [True, False],
103
+ 1.2,
104
+ [1.2, 3.4],
105
+ 1,
106
+ [1, 2],
107
+ "test",
108
+ ["test1", "test2"],
109
+ b"1",
110
+ _TEST_TIMESTAMP_WITH_TIMEZONE,
111
+ _TEST_TIMESTAMP_WITHOUT_TIMEZONE,
112
+ ],
113
+ [
114
+ True,
115
+ [True, True],
116
+ 2.2,
117
+ [2.2, 4.4],
118
+ 2,
119
+ [2, 3],
120
+ "test1",
121
+ ["test2", "test3"],
122
+ b"0",
123
+ _TEST_TIMESTAMP_WITH_TIMEZONE,
124
+ _TEST_TIMESTAMP_WITHOUT_TIMEZONE,
125
+ ],
126
+ ],
127
+ columns=_TEST_DF_COLUMN_NAMES,
128
+ )
129
+ _TEST_DATAFRAME_BQ_SCHEMA = [
130
+ bigquery.SchemaField(name="bool_col", field_type="BOOL"),
131
+ bigquery.SchemaField(name="bool_array_col", field_type="BOOL", mode="REPEATED"),
132
+ bigquery.SchemaField(name="double_col", field_type="FLOAT"),
133
+ bigquery.SchemaField(name="double_array_col", field_type="FLOAT", mode="REPEATED"),
134
+ bigquery.SchemaField(name="int64_col", field_type="INTEGER"),
135
+ bigquery.SchemaField(name="int64_array_col", field_type="INTEGER", mode="REPEATED"),
136
+ bigquery.SchemaField(name="string_col", field_type="STRING"),
137
+ bigquery.SchemaField(name="string_array_col", field_type="STRING", mode="REPEATED"),
138
+ bigquery.SchemaField(name="bytes_col", field_type="STRING"),
139
+ bigquery.SchemaField(name="timestamp_col", field_type="TIMESTAMP"),
140
+ bigquery.SchemaField(name="datetime_col", field_type="DATETIME"),
141
+ ]
142
+
143
+
144
+ class TestDataset(e2e_base.TestEndToEnd):
145
+
146
+ _temp_prefix = "temp-vertex-sdk-dataset-test"
147
+
148
+ def setup_method(self):
149
+ importlib.reload(initializer)
150
+ importlib.reload(aiplatform)
151
+
152
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
153
+
154
+ @pytest.fixture()
155
+ def storage_client(self):
156
+ yield storage.Client(project=_TEST_PROJECT)
157
+
158
+ @pytest.fixture()
159
+ def staging_bucket(self, storage_client):
160
+ new_staging_bucket = f"temp-sdk-integration-{uuid.uuid4()}"
161
+ bucket = storage_client.create_bucket(new_staging_bucket)
162
+
163
+ yield bucket
164
+
165
+ bucket.delete(force=True)
166
+
167
+ @pytest.fixture()
168
+ def dataset_gapic_client(self):
169
+ gapic_client = dataset_service.DatasetServiceClient(
170
+ client_options=client_options.ClientOptions(api_endpoint=_TEST_API_ENDPOINT)
171
+ )
172
+
173
+ yield gapic_client
174
+
175
+ # TODO(vinnys): Remove pytest skip once persistent resources are accessible
176
+ @pytest.mark.skip(reason="System tests cannot access persistent test resources")
177
+ def test_get_existing_dataset(self):
178
+ """Retrieve a known existing dataset, ensure SDK successfully gets the
179
+ dataset resource."""
180
+
181
+ flowers_dataset = aiplatform.ImageDataset(dataset_name=_TEST_IMAGE_DATASET_ID)
182
+ assert flowers_dataset.name == _TEST_IMAGE_DATASET_ID
183
+ assert flowers_dataset.display_name == _TEST_DATASET_DISPLAY_NAME
184
+
185
+ def test_get_nonexistent_dataset(self):
186
+ """Ensure attempting to retrieve a dataset that doesn't exist raises
187
+ a Google API core 404 exception."""
188
+
189
+ # AI Platform service returns 404
190
+ with pytest.raises(exceptions.NotFound):
191
+ aiplatform.ImageDataset(dataset_name="0")
192
+
193
+ def test_get_new_dataset_and_import(self, dataset_gapic_client):
194
+ """Retrieve new, empty dataset and import a text dataset using import().
195
+ Then verify data items were successfully imported."""
196
+
197
+ try:
198
+ text_dataset = aiplatform.TextDataset.create(
199
+ display_name=self._make_display_name(key="get_new_dataset_and_import"),
200
+ )
201
+
202
+ my_dataset = aiplatform.TextDataset(dataset_name=text_dataset.name)
203
+
204
+ data_items_pre_import = dataset_gapic_client.list_data_items(
205
+ parent=my_dataset.resource_name
206
+ )
207
+
208
+ assert len(list(data_items_pre_import)) == 0
209
+
210
+ # Blocking call to import
211
+ my_dataset.import_data(
212
+ gcs_source=_TEST_TEXT_ENTITY_EXTRACTION_GCS_SOURCE,
213
+ import_schema_uri=_TEST_TEXT_ENTITY_IMPORT_SCHEMA,
214
+ import_request_timeout=500,
215
+ )
216
+
217
+ data_items_post_import = dataset_gapic_client.list_data_items(
218
+ parent=my_dataset.resource_name
219
+ )
220
+
221
+ assert len(list(data_items_post_import)) == 51
222
+ finally:
223
+ text_dataset.delete()
224
+
225
+ @vpcsc_config.skip_if_inside_vpcsc
226
+ def test_create_and_import_image_dataset(self, dataset_gapic_client):
227
+ """Use the Dataset.create() method to create a new image obj detection
228
+ dataset and import images. Then confirm images were successfully imported."""
229
+
230
+ try:
231
+ img_dataset = aiplatform.ImageDataset.create(
232
+ display_name=self._make_display_name(key="create_image_dataset"),
233
+ gcs_source=_TEST_IMAGE_OBJECT_DETECTION_GCS_SOURCE,
234
+ import_schema_uri=_TEST_IMAGE_OBJ_DET_SEGMENTATION_IMPORT_SCHEMA,
235
+ create_request_timeout=None,
236
+ )
237
+
238
+ finally:
239
+ if img_dataset is not None:
240
+ img_dataset.delete()
241
+
242
+ def test_create_tabular_dataset(self):
243
+ """Use the Dataset.create() method to create a new tabular dataset.
244
+ Then confirm the dataset was successfully created and references GCS source."""
245
+
246
+ try:
247
+ tabular_dataset = aiplatform.TabularDataset.create(
248
+ display_name=self._make_display_name(key="create_tabular_dataset"),
249
+ gcs_source=[_TEST_TABULAR_CLASSIFICATION_GCS_SOURCE],
250
+ create_request_timeout=None,
251
+ )
252
+
253
+ gapic_metadata = tabular_dataset.to_dict()["metadata"]
254
+ gcs_source_uris = gapic_metadata["inputConfig"]["gcsSource"]["uri"]
255
+
256
+ assert len(gcs_source_uris) == 1
257
+ assert _TEST_TABULAR_CLASSIFICATION_GCS_SOURCE == gcs_source_uris[0]
258
+ assert (
259
+ tabular_dataset.metadata_schema_uri
260
+ == aiplatform.schema.dataset.metadata.tabular
261
+ )
262
+
263
+ finally:
264
+ if tabular_dataset is not None:
265
+ tabular_dataset.delete()
266
+
267
+ def test_create_tabular_dataset_from_dataframe(self, bigquery_dataset):
268
+ table_id = f"test_table{uuid.uuid4()}"
269
+ bq_staging_table = (
270
+ f"bq://{_TEST_PROJECT}.{bigquery_dataset.dataset_id}.{table_id}"
271
+ )
272
+ try:
273
+ tabular_dataset = aiplatform.TabularDataset.create_from_dataframe(
274
+ df_source=_TEST_DATAFRAME,
275
+ staging_path=bq_staging_table,
276
+ display_name=self._make_display_name(
277
+ key="create_and_import_dataset_from_dataframe"
278
+ ),
279
+ )
280
+
281
+ """Use the Dataset.create_from_dataframe() method to create a new tabular dataset.
282
+ Then confirm the dataset was successfully created and references the BQ source."""
283
+ gapic_metadata = tabular_dataset.to_dict()["metadata"]
284
+ bq_source = gapic_metadata["inputConfig"]["bigquerySource"]["uri"]
285
+
286
+ assert bq_staging_table == bq_source
287
+ assert (
288
+ tabular_dataset.metadata_schema_uri
289
+ == aiplatform.schema.dataset.metadata.tabular
290
+ )
291
+ bigquery_client = bigquery.Client(
292
+ project=_TEST_PROJECT,
293
+ credentials=initializer.global_config.credentials,
294
+ )
295
+ table = bigquery_client.get_table(
296
+ f"{_TEST_PROJECT}.{bigquery_dataset.dataset_id}.{table_id}"
297
+ )
298
+ assert (
299
+ table.schema[-1]
300
+ == bigquery.SchemaField(name="datetime_col", field_type="DATETIME")
301
+ if re.match(
302
+ r"3.*",
303
+ bigquery.__version__,
304
+ )
305
+ else bigquery.SchemaField(name="datetime_col", field_type="TIMESTAMP")
306
+ )
307
+ finally:
308
+ if tabular_dataset is not None:
309
+ tabular_dataset.delete()
310
+
311
+ def test_create_tabular_dataset_from_dataframe_with_provided_schema(
312
+ self, bigquery_dataset
313
+ ):
314
+ """Use the Dataset.create_from_dataframe() method to create a new tabular dataset,
315
+ passing in the optional `bq_schema` argument. Then confirm the dataset was successfully
316
+ created and references the BQ source."""
317
+
318
+ try:
319
+ bq_staging_table = f"bq://{_TEST_PROJECT}.{bigquery_dataset.dataset_id}.test_table{uuid.uuid4()}"
320
+
321
+ tabular_dataset = aiplatform.TabularDataset.create_from_dataframe(
322
+ df_source=_TEST_DATAFRAME,
323
+ staging_path=bq_staging_table,
324
+ display_name=self._make_display_name(
325
+ key="create_and_import_dataset_from_dataframe"
326
+ ),
327
+ bq_schema=_TEST_DATAFRAME_BQ_SCHEMA,
328
+ )
329
+
330
+ gapic_metadata = tabular_dataset.to_dict()["metadata"]
331
+ bq_source = gapic_metadata["inputConfig"]["bigquerySource"]["uri"]
332
+
333
+ assert bq_staging_table == bq_source
334
+ assert (
335
+ tabular_dataset.metadata_schema_uri
336
+ == aiplatform.schema.dataset.metadata.tabular
337
+ )
338
+ finally:
339
+ tabular_dataset.delete()
340
+
341
+ def test_create_time_series_dataset(self):
342
+ """Use the Dataset.create() method to create a new time series dataset.
343
+ Then confirm the dataset was successfully created and references GCS source."""
344
+
345
+ try:
346
+ time_series_dataset = aiplatform.TimeSeriesDataset.create(
347
+ display_name=self._make_display_name(key="create_time_series_dataset"),
348
+ bq_source=[_TEST_FORECASTING_BQ_SOURCE],
349
+ create_request_timeout=None,
350
+ )
351
+
352
+ gapic_metadata = time_series_dataset.to_dict()["metadata"]
353
+ bq_source_uri = gapic_metadata["inputConfig"]["bigquerySource"]["uri"]
354
+
355
+ assert _TEST_FORECASTING_BQ_SOURCE == bq_source_uri
356
+ assert (
357
+ time_series_dataset.metadata_schema_uri
358
+ == aiplatform.schema.dataset.metadata.time_series
359
+ )
360
+
361
+ finally:
362
+ if time_series_dataset is not None:
363
+ time_series_dataset.delete()
364
+
365
+ def test_export_data(self, storage_client, staging_bucket):
366
+ """Get an existing dataset, export data to a newly created folder in
367
+ Google Cloud Storage, then verify data was successfully exported."""
368
+
369
+ dataset = aiplatform.TextDataset(dataset_name=_TEST_TEXT_DATASET_ID)
370
+
371
+ exported_files = dataset.export_data(output_dir=f"gs://{staging_bucket.name}")
372
+
373
+ assert len(exported_files) # Ensure at least one GCS path was returned
374
+
375
+ exported_file = exported_files[0]
376
+ bucket, prefix = utils.extract_bucket_and_prefix_from_gcs_path(exported_file)
377
+
378
+ bucket = storage_client.get_bucket(bucket)
379
+ blob = bucket.get_blob(prefix)
380
+
381
+ assert blob # Verify the returned GCS export path exists
382
+
383
+ def test_export_data_for_custom_training(self, staging_bucket):
384
+ """Get an existing dataset, export data to a newly created folder in
385
+ Google Cloud Storage, then verify data was successfully exported."""
386
+
387
+ # pylint: disable=protected-access
388
+ # Custom training data export should be generic, hence using the base
389
+ # _Dataset class here in test. In practice, users shuold be able to
390
+ # use this function in any inhericted classes of _Dataset.
391
+ dataset = aiplatform.datasets._Dataset(dataset_name=_TEST_IMAGE_DATASET_ID)
392
+
393
+ split = {
394
+ "training_filter": "labels.aiplatform.googleapis.com/ml_use=training",
395
+ "validation_filter": "labels.aiplatform.googleapis.com/ml_use=validation",
396
+ "test_filter": "labels.aiplatform.googleapis.com/ml_use=test",
397
+ }
398
+
399
+ export_data_response = dataset.export_data_for_custom_training(
400
+ output_dir=f"gs://{staging_bucket.name}",
401
+ annotation_schema_uri="gs://google-cloud-aiplatform/schema/dataset/annotation/image_classification_1.0.0.yaml",
402
+ split=split,
403
+ )
404
+
405
+ # Ensure three output paths (training, validation and test) are provided
406
+ assert len(export_data_response["exportedFiles"]) == 3
407
+ # Ensure data stats are calculated and correct
408
+ assert int(export_data_response["dataStats"]["trainingDataItemsCount"]) == 40
409
+ assert int(export_data_response["dataStats"]["validationDataItemsCount"]) == 5
410
+ assert int(export_data_response["dataStats"]["testDataItemsCount"]) == 5
411
+ assert int(export_data_response["dataStats"]["trainingAnnotationsCount"]) == 40
412
+ assert int(export_data_response["dataStats"]["validationAnnotationsCount"]) == 5
413
+ assert int(export_data_response["dataStats"]["testAnnotationsCount"]) == 5
414
+
415
+ def test_update_dataset(self):
416
+ """Create a new dataset and use update() method to change its display_name, labels, and description.
417
+ Then confirm these fields of the dataset was successfully modifed."""
418
+
419
+ try:
420
+ dataset = aiplatform.ImageDataset.create()
421
+ labels = dataset.labels
422
+
423
+ dataset = dataset.update(
424
+ display_name=_TEST_DATASET_DISPLAY_NAME,
425
+ labels=_TEST_DATASET_LABELS,
426
+ description=_TEST_DATASET_DESCRIPTION,
427
+ update_request_timeout=None,
428
+ )
429
+ labels.update(_TEST_DATASET_LABELS)
430
+
431
+ assert dataset.display_name == _TEST_DATASET_DISPLAY_NAME
432
+ assert dataset.labels == labels
433
+ assert dataset.gca_resource.description == _TEST_DATASET_DESCRIPTION
434
+
435
+ finally:
436
+ dataset.delete()
testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_e2e_forecasting.py ADDED
@@ -0,0 +1,395 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # Copyright 2022 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ #
17
+
18
+ from google.cloud import aiplatform
19
+ from google.cloud.aiplatform import training_jobs
20
+
21
+ from google.cloud.aiplatform.compat.types import job_state
22
+ from google.cloud.aiplatform.compat.types import pipeline_state
23
+ import pytest
24
+ from tests.system.aiplatform import e2e_base
25
+
26
+ _TRAINING_DATASET_BQ_PATH = (
27
+ "bq://ucaip-sample-tests:ucaip_test_us_central1.2020_sales_train"
28
+ )
29
+ _PREDICTION_DATASET_BQ_PATH = (
30
+ "bq://ucaip-sample-tests:ucaip_test_us_central1.2021_sales_predict"
31
+ )
32
+
33
+
34
+ @pytest.mark.usefixtures("prepare_staging_bucket", "delete_staging_bucket")
35
+ class TestEndToEndForecasting1(e2e_base.TestEndToEnd):
36
+ """End to end system test of the Vertex SDK with forecasting data."""
37
+
38
+ _temp_prefix = "temp-vertex-sdk-e2e-forecasting"
39
+
40
+ @pytest.mark.parametrize(
41
+ "training_job",
42
+ [
43
+ training_jobs.AutoMLForecastingTrainingJob,
44
+ ],
45
+ )
46
+ def test_end_to_end_forecasting(self, shared_state, training_job):
47
+ """Builds a dataset, trains models, and gets batch predictions."""
48
+ resources = []
49
+
50
+ aiplatform.init(
51
+ project=e2e_base._PROJECT,
52
+ location=e2e_base._LOCATION,
53
+ staging_bucket=shared_state["staging_bucket_name"],
54
+ )
55
+ try:
56
+ ds = aiplatform.TimeSeriesDataset.create(
57
+ display_name=self._make_display_name("dataset"),
58
+ bq_source=[_TRAINING_DATASET_BQ_PATH],
59
+ sync=False,
60
+ create_request_timeout=180.0,
61
+ )
62
+ resources.append(ds)
63
+
64
+ time_column = "date"
65
+ time_series_identifier_column = "store_name"
66
+ target_column = "sale_dollars"
67
+ column_specs = {
68
+ time_column: "timestamp",
69
+ target_column: "numeric",
70
+ "city": "categorical",
71
+ "zip_code": "categorical",
72
+ "county": "categorical",
73
+ }
74
+
75
+ job = training_job(
76
+ display_name=self._make_display_name("train-housing-forecasting"),
77
+ optimization_objective="minimize-rmse",
78
+ column_specs=column_specs,
79
+ )
80
+ resources.append(job)
81
+
82
+ model = job.run(
83
+ dataset=ds,
84
+ target_column=target_column,
85
+ time_column=time_column,
86
+ time_series_identifier_column=time_series_identifier_column,
87
+ available_at_forecast_columns=[time_column],
88
+ unavailable_at_forecast_columns=[target_column],
89
+ time_series_attribute_columns=["city", "zip_code", "county"],
90
+ forecast_horizon=30,
91
+ context_window=30,
92
+ data_granularity_unit="day",
93
+ data_granularity_count=1,
94
+ budget_milli_node_hours=1000,
95
+ holiday_regions=["GLOBAL"],
96
+ hierarchy_group_total_weight=1,
97
+ window_stride_length=1,
98
+ model_display_name=self._make_display_name("forecasting-liquor-model"),
99
+ sync=False,
100
+ )
101
+ resources.append(model)
102
+
103
+ batch_prediction_job = model.batch_predict(
104
+ job_display_name=self._make_display_name("forecasting-liquor-model"),
105
+ instances_format="bigquery",
106
+ predictions_format="csv",
107
+ machine_type="n1-standard-4",
108
+ bigquery_source=_PREDICTION_DATASET_BQ_PATH,
109
+ gcs_destination_prefix=(
110
+ f'gs://{shared_state["staging_bucket_name"]}/bp_results/'
111
+ ),
112
+ sync=False,
113
+ )
114
+ resources.append(batch_prediction_job)
115
+
116
+ batch_prediction_job.wait()
117
+ model.wait()
118
+ assert job.state == pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
119
+ assert batch_prediction_job.state == job_state.JobState.JOB_STATE_SUCCEEDED
120
+ finally:
121
+ for resource in resources:
122
+ resource.delete()
123
+
124
+
125
+ @pytest.mark.usefixtures("prepare_staging_bucket", "delete_staging_bucket")
126
+ class TestEndToEndForecasting2(e2e_base.TestEndToEnd):
127
+ """End to end system test of the Vertex SDK with forecasting data."""
128
+
129
+ _temp_prefix = "temp-vertex-sdk-e2e-forecasting"
130
+
131
+ @pytest.mark.parametrize(
132
+ "training_job",
133
+ [
134
+ training_jobs.SequenceToSequencePlusForecastingTrainingJob,
135
+ ],
136
+ )
137
+ def test_end_to_end_forecasting(self, shared_state, training_job):
138
+ """Builds a dataset, trains models, and gets batch predictions."""
139
+ resources = []
140
+
141
+ aiplatform.init(
142
+ project=e2e_base._PROJECT,
143
+ location=e2e_base._LOCATION,
144
+ staging_bucket=shared_state["staging_bucket_name"],
145
+ )
146
+ try:
147
+ ds = aiplatform.TimeSeriesDataset.create(
148
+ display_name=self._make_display_name("dataset"),
149
+ bq_source=[_TRAINING_DATASET_BQ_PATH],
150
+ sync=False,
151
+ create_request_timeout=180.0,
152
+ )
153
+ resources.append(ds)
154
+
155
+ time_column = "date"
156
+ time_series_identifier_column = "store_name"
157
+ target_column = "sale_dollars"
158
+ column_specs = {
159
+ time_column: "timestamp",
160
+ target_column: "numeric",
161
+ "city": "categorical",
162
+ "zip_code": "categorical",
163
+ "county": "categorical",
164
+ }
165
+
166
+ job = training_job(
167
+ display_name=self._make_display_name("train-housing-forecasting"),
168
+ optimization_objective="minimize-rmse",
169
+ column_specs=column_specs,
170
+ )
171
+ resources.append(job)
172
+
173
+ model = job.run(
174
+ dataset=ds,
175
+ target_column=target_column,
176
+ time_column=time_column,
177
+ time_series_identifier_column=time_series_identifier_column,
178
+ available_at_forecast_columns=[time_column],
179
+ unavailable_at_forecast_columns=[target_column],
180
+ time_series_attribute_columns=["city", "zip_code", "county"],
181
+ forecast_horizon=30,
182
+ context_window=30,
183
+ data_granularity_unit="day",
184
+ data_granularity_count=1,
185
+ budget_milli_node_hours=1000,
186
+ holiday_regions=["GLOBAL"],
187
+ hierarchy_group_total_weight=1,
188
+ window_stride_length=1,
189
+ model_display_name=self._make_display_name("forecasting-liquor-model"),
190
+ sync=False,
191
+ )
192
+ resources.append(model)
193
+
194
+ batch_prediction_job = model.batch_predict(
195
+ job_display_name=self._make_display_name("forecasting-liquor-model"),
196
+ instances_format="bigquery",
197
+ predictions_format="csv",
198
+ machine_type="n1-standard-4",
199
+ bigquery_source=_PREDICTION_DATASET_BQ_PATH,
200
+ gcs_destination_prefix=(
201
+ f'gs://{shared_state["staging_bucket_name"]}/bp_results/'
202
+ ),
203
+ sync=False,
204
+ )
205
+ resources.append(batch_prediction_job)
206
+
207
+ batch_prediction_job.wait()
208
+ model.wait()
209
+ assert job.state == pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
210
+ assert batch_prediction_job.state == job_state.JobState.JOB_STATE_SUCCEEDED
211
+ finally:
212
+ for resource in resources:
213
+ resource.delete()
214
+
215
+
216
+ @pytest.mark.usefixtures("prepare_staging_bucket", "delete_staging_bucket")
217
+ class TestEndToEndForecasting3(e2e_base.TestEndToEnd):
218
+ """End to end system test of the Vertex SDK with forecasting data."""
219
+
220
+ _temp_prefix = "temp-vertex-sdk-e2e-forecasting"
221
+
222
+ @pytest.mark.parametrize(
223
+ "training_job",
224
+ [
225
+ training_jobs.TemporalFusionTransformerForecastingTrainingJob,
226
+ ],
227
+ )
228
+ def test_end_to_end_forecasting(self, shared_state, training_job):
229
+ """Builds a dataset, trains models, and gets batch predictions."""
230
+ resources = []
231
+
232
+ aiplatform.init(
233
+ project=e2e_base._PROJECT,
234
+ location=e2e_base._LOCATION,
235
+ staging_bucket=shared_state["staging_bucket_name"],
236
+ )
237
+ try:
238
+ ds = aiplatform.TimeSeriesDataset.create(
239
+ display_name=self._make_display_name("dataset"),
240
+ bq_source=[_TRAINING_DATASET_BQ_PATH],
241
+ sync=False,
242
+ create_request_timeout=180.0,
243
+ )
244
+ resources.append(ds)
245
+
246
+ time_column = "date"
247
+ time_series_identifier_column = "store_name"
248
+ target_column = "sale_dollars"
249
+ column_specs = {
250
+ time_column: "timestamp",
251
+ target_column: "numeric",
252
+ "city": "categorical",
253
+ "zip_code": "categorical",
254
+ "county": "categorical",
255
+ }
256
+
257
+ job = training_job(
258
+ display_name=self._make_display_name("train-housing-forecasting"),
259
+ optimization_objective="minimize-rmse",
260
+ column_specs=column_specs,
261
+ )
262
+ resources.append(job)
263
+
264
+ model = job.run(
265
+ dataset=ds,
266
+ target_column=target_column,
267
+ time_column=time_column,
268
+ time_series_identifier_column=time_series_identifier_column,
269
+ available_at_forecast_columns=[time_column],
270
+ unavailable_at_forecast_columns=[target_column],
271
+ time_series_attribute_columns=["city", "zip_code", "county"],
272
+ forecast_horizon=30,
273
+ context_window=30,
274
+ data_granularity_unit="day",
275
+ data_granularity_count=1,
276
+ budget_milli_node_hours=1000,
277
+ holiday_regions=["GLOBAL"],
278
+ hierarchy_group_total_weight=1,
279
+ window_stride_length=1,
280
+ model_display_name=self._make_display_name("forecasting-liquor-model"),
281
+ sync=False,
282
+ )
283
+ resources.append(model)
284
+
285
+ batch_prediction_job = model.batch_predict(
286
+ job_display_name=self._make_display_name("forecasting-liquor-model"),
287
+ instances_format="bigquery",
288
+ predictions_format="csv",
289
+ machine_type="n1-standard-4",
290
+ bigquery_source=_PREDICTION_DATASET_BQ_PATH,
291
+ gcs_destination_prefix=(
292
+ f'gs://{shared_state["staging_bucket_name"]}/bp_results/'
293
+ ),
294
+ sync=False,
295
+ )
296
+ resources.append(batch_prediction_job)
297
+
298
+ batch_prediction_job.wait()
299
+ model.wait()
300
+ assert job.state == pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
301
+ assert batch_prediction_job.state == job_state.JobState.JOB_STATE_SUCCEEDED
302
+ finally:
303
+ for resource in resources:
304
+ resource.delete()
305
+
306
+
307
+ @pytest.mark.usefixtures("prepare_staging_bucket", "delete_staging_bucket")
308
+ class TestEndToEndForecasting4(e2e_base.TestEndToEnd):
309
+ """End to end system test of the Vertex SDK with forecasting data."""
310
+
311
+ _temp_prefix = "temp-vertex-sdk-e2e-forecasting"
312
+
313
+ @pytest.mark.parametrize(
314
+ "training_job",
315
+ [
316
+ training_jobs.TimeSeriesDenseEncoderForecastingTrainingJob,
317
+ ],
318
+ )
319
+ def test_end_to_end_forecasting(self, shared_state, training_job):
320
+ """Builds a dataset, trains models, and gets batch predictions."""
321
+ resources = []
322
+
323
+ aiplatform.init(
324
+ project=e2e_base._PROJECT,
325
+ location=e2e_base._LOCATION,
326
+ staging_bucket=shared_state["staging_bucket_name"],
327
+ )
328
+ try:
329
+ ds = aiplatform.TimeSeriesDataset.create(
330
+ display_name=self._make_display_name("dataset"),
331
+ bq_source=[_TRAINING_DATASET_BQ_PATH],
332
+ sync=False,
333
+ create_request_timeout=180.0,
334
+ )
335
+ resources.append(ds)
336
+
337
+ time_column = "date"
338
+ time_series_identifier_column = "store_name"
339
+ target_column = "sale_dollars"
340
+ column_specs = {
341
+ time_column: "timestamp",
342
+ target_column: "numeric",
343
+ "city": "categorical",
344
+ "zip_code": "categorical",
345
+ "county": "categorical",
346
+ }
347
+
348
+ job = training_job(
349
+ display_name=self._make_display_name("train-housing-forecasting"),
350
+ optimization_objective="minimize-rmse",
351
+ column_specs=column_specs,
352
+ )
353
+ resources.append(job)
354
+
355
+ model = job.run(
356
+ dataset=ds,
357
+ target_column=target_column,
358
+ time_column=time_column,
359
+ time_series_identifier_column=time_series_identifier_column,
360
+ available_at_forecast_columns=[time_column],
361
+ unavailable_at_forecast_columns=[target_column],
362
+ time_series_attribute_columns=["city", "zip_code", "county"],
363
+ forecast_horizon=30,
364
+ context_window=30,
365
+ data_granularity_unit="day",
366
+ data_granularity_count=1,
367
+ budget_milli_node_hours=1000,
368
+ holiday_regions=["GLOBAL"],
369
+ hierarchy_group_total_weight=1,
370
+ window_stride_length=1,
371
+ model_display_name=self._make_display_name("forecasting-liquor-model"),
372
+ sync=False,
373
+ )
374
+ resources.append(model)
375
+
376
+ batch_prediction_job = model.batch_predict(
377
+ job_display_name=self._make_display_name("forecasting-liquor-model"),
378
+ instances_format="bigquery",
379
+ predictions_format="csv",
380
+ machine_type="n1-standard-4",
381
+ bigquery_source=_PREDICTION_DATASET_BQ_PATH,
382
+ gcs_destination_prefix=(
383
+ f'gs://{shared_state["staging_bucket_name"]}/bp_results/'
384
+ ),
385
+ sync=False,
386
+ )
387
+ resources.append(batch_prediction_job)
388
+
389
+ batch_prediction_job.wait()
390
+ model.wait()
391
+ assert job.state == pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
392
+ assert batch_prediction_job.state == job_state.JobState.JOB_STATE_SUCCEEDED
393
+ finally:
394
+ for resource in resources:
395
+ resource.delete()
testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_e2e_metadata_schema.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # Copyright 2022 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ #
17
+ import json
18
+
19
+ import pytest
20
+
21
+ from google.cloud import aiplatform
22
+ from google.cloud.aiplatform.metadata.schema.google import (
23
+ artifact_schema as google_artifact_schema,
24
+ )
25
+ from google.cloud.aiplatform.metadata.schema.system import (
26
+ artifact_schema as system_artifact_schema,
27
+ )
28
+ from google.cloud.aiplatform.metadata.schema.system import (
29
+ execution_schema as system_execution_schema,
30
+ )
31
+ from tests.system.aiplatform import e2e_base
32
+
33
+
34
+ @pytest.mark.usefixtures("tear_down_resources")
35
+ class TestMetadataSchema(e2e_base.TestEndToEnd):
36
+
37
+ _temp_prefix = "tmpvrtxmlmdsdk-e2e"
38
+
39
+ def setup_class(cls):
40
+ # Truncating the name because of resource id constraints from the service
41
+ cls.artifact_display_name = cls._make_display_name("base-artifact")[:30]
42
+ cls.artifact_id = cls._make_display_name("base-artifact-id")[:30]
43
+ cls.artifact_uri = cls._make_display_name("base-uri")
44
+ cls.artifact_metadata = {"test_property": "test_value"}
45
+ cls.artifact_description = cls._make_display_name("base-description")
46
+ cls.execution_display_name = cls._make_display_name("base-execution")[:30]
47
+ cls.execution_description = cls._make_display_name("base-description")
48
+
49
+ def test_system_dataset_artifact_create(self, shared_state):
50
+
51
+ aiplatform.init(
52
+ project=e2e_base._PROJECT,
53
+ location=e2e_base._LOCATION,
54
+ )
55
+
56
+ artifact = system_artifact_schema.Dataset(
57
+ display_name=self.artifact_display_name,
58
+ uri=self.artifact_uri,
59
+ metadata=self.artifact_metadata,
60
+ description=self.artifact_description,
61
+ ).create()
62
+
63
+ shared_state["resources"] = [artifact]
64
+
65
+ assert artifact.display_name == self.artifact_display_name
66
+ assert json.dumps(artifact.metadata, sort_keys=True) == json.dumps(
67
+ self.artifact_metadata, sort_keys=True
68
+ )
69
+ assert artifact.schema_title == "system.Dataset"
70
+ assert artifact.description == self.artifact_description
71
+ assert "/metadataStores/default/artifacts/" in artifact.resource_name
72
+
73
+ def test_google_dataset_artifact_create(self, shared_state):
74
+
75
+ aiplatform.init(
76
+ project=e2e_base._PROJECT,
77
+ location=e2e_base._LOCATION,
78
+ )
79
+ vertex_dataset_name = f"projects/{e2e_base._PROJECT}/locations/{e2e_base._LOCATION}/datasets/dataset"
80
+ artifact = google_artifact_schema.VertexDataset(
81
+ vertex_dataset_name=vertex_dataset_name,
82
+ display_name=self.artifact_display_name,
83
+ metadata=self.artifact_metadata,
84
+ description=self.artifact_description,
85
+ ).create()
86
+
87
+ shared_state["resources"].append(artifact)
88
+
89
+ expected_metadata = self.artifact_metadata.copy()
90
+ expected_metadata["resourceName"] = vertex_dataset_name
91
+
92
+ assert artifact.display_name == self.artifact_display_name
93
+ assert json.dumps(artifact.metadata, sort_keys=True) == json.dumps(
94
+ expected_metadata, sort_keys=True
95
+ )
96
+ assert artifact.schema_title == "google.VertexDataset"
97
+ assert artifact.description == self.artifact_description
98
+ assert "/metadataStores/default/artifacts/" in artifact.resource_name
99
+ assert (
100
+ artifact.uri
101
+ == f"https://{e2e_base._LOCATION}-aiplatform.googleapis.com/v1/{vertex_dataset_name}"
102
+ )
103
+
104
+ def test_execution_create_using_system_schema_class(self, shared_state):
105
+
106
+ aiplatform.init(
107
+ project=e2e_base._PROJECT,
108
+ location=e2e_base._LOCATION,
109
+ )
110
+
111
+ execution = system_execution_schema.CustomJobExecution(
112
+ display_name=self.execution_display_name,
113
+ description=self.execution_description,
114
+ ).create()
115
+
116
+ shared_state["resources"].append(execution)
117
+
118
+ assert execution.display_name == self.execution_display_name
119
+ assert execution.schema_title == "system.CustomJobExecution"
120
+ assert execution.description == self.execution_description
121
+ assert "/metadataStores/default/executions/" in execution.resource_name
testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_e2e_tabular.py ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # Copyright 2023 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ #
17
+
18
+ import os
19
+
20
+ import pytest
21
+
22
+ from google.cloud import storage
23
+
24
+ from google.cloud import aiplatform
25
+ from google.cloud.aiplatform.compat.types import (
26
+ job_state as gca_job_state,
27
+ pipeline_state as gca_pipeline_state,
28
+ )
29
+ from tests.system.aiplatform import e2e_base
30
+
31
+
32
+ _DATASET_TRAINING_SRC = "gs://cloud-samples-data-us-central1/vertex-ai/structured_data/california_housing/california-housing-data.csv"
33
+ _DATASET_BATCH_PREDICT_SRC = "gs://cloud-samples-data-us-central1/vertex-ai/batch-prediction/california_housing_batch_predict.jsonl"
34
+ _DIR_NAME = os.path.dirname(os.path.abspath(__file__))
35
+ _LOCAL_TRAINING_SCRIPT_PATH = os.path.join(
36
+ _DIR_NAME, "test_resources/california_housing_training_script.py"
37
+ )
38
+ _INSTANCE = {
39
+ "longitude": -124.35,
40
+ "latitude": 40.54,
41
+ "housing_median_age": 52.0,
42
+ "total_rooms": 1820.0,
43
+ "total_bedrooms": 300.0,
44
+ "population": 806,
45
+ "households": 270.0,
46
+ "median_income": 3.014700,
47
+ }
48
+
49
+
50
+ @pytest.mark.usefixtures(
51
+ "prepare_staging_bucket", "delete_staging_bucket", "tear_down_resources"
52
+ )
53
+ class TestEndToEndTabular(e2e_base.TestEndToEnd):
54
+ """End to end system test of the Vertex SDK with tabular data adapted from
55
+ reference notebook http://shortn/_eyoNx3SN0X"""
56
+
57
+ _temp_prefix = "temp-vertex-sdk-e2e-tabular"
58
+
59
+ def test_end_to_end_tabular(self, shared_state):
60
+ """Build dataset, train a custom and AutoML model, deploy, and get predictions"""
61
+
62
+ # Collection of resources generated by this test, to be deleted during teardown
63
+ shared_state["resources"] = []
64
+
65
+ aiplatform.init(
66
+ project=e2e_base._PROJECT,
67
+ location=e2e_base._LOCATION,
68
+ staging_bucket=shared_state["staging_bucket_name"],
69
+ )
70
+
71
+ # Create and import to single managed dataset for both training jobs
72
+
73
+ ds = aiplatform.TabularDataset.create(
74
+ display_name=self._make_display_name("dataset"),
75
+ gcs_source=[_DATASET_TRAINING_SRC],
76
+ sync=False,
77
+ create_request_timeout=180.0,
78
+ )
79
+
80
+ shared_state["resources"].extend([ds])
81
+
82
+ # Define both training jobs
83
+
84
+ custom_job = aiplatform.CustomTrainingJob(
85
+ display_name=self._make_display_name("train-housing-custom"),
86
+ script_path=_LOCAL_TRAINING_SCRIPT_PATH,
87
+ container_uri="gcr.io/cloud-aiplatform/training/tf-cpu.2-2:latest",
88
+ requirements=["gcsfs==0.7.1"],
89
+ model_serving_container_image_uri="gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-2:latest",
90
+ )
91
+
92
+ automl_job = aiplatform.AutoMLTabularTrainingJob(
93
+ display_name=self._make_display_name("train-housing-automl"),
94
+ optimization_prediction_type="regression",
95
+ optimization_objective="minimize-rmse",
96
+ )
97
+
98
+ # Kick off both training jobs, AutoML job will take approx one hour to run
99
+
100
+ custom_model = custom_job.run(
101
+ ds,
102
+ replica_count=1,
103
+ model_display_name=self._make_display_name("custom-housing-model"),
104
+ timeout=1234,
105
+ restart_job_on_worker_restart=True,
106
+ enable_web_access=True,
107
+ sync=False,
108
+ create_request_timeout=None,
109
+ disable_retries=True,
110
+ )
111
+
112
+ automl_model = automl_job.run(
113
+ dataset=ds,
114
+ target_column="median_house_value",
115
+ model_display_name=self._make_display_name("automl-housing-model"),
116
+ sync=False,
117
+ )
118
+
119
+ shared_state["resources"].extend(
120
+ [automl_job, automl_model, custom_job, custom_model]
121
+ )
122
+
123
+ # Deploy both models after training completes
124
+ custom_endpoint = custom_model.deploy(machine_type="n1-standard-4", sync=False)
125
+ automl_endpoint = automl_model.deploy(machine_type="n1-standard-4", sync=False)
126
+ shared_state["resources"].extend([automl_endpoint, custom_endpoint])
127
+
128
+ custom_batch_prediction_job = custom_model.batch_predict(
129
+ job_display_name=self._make_display_name("custom-housing-model"),
130
+ instances_format="jsonl",
131
+ machine_type="n1-standard-4",
132
+ gcs_source=_DATASET_BATCH_PREDICT_SRC,
133
+ gcs_destination_prefix=f'gs://{shared_state["staging_bucket_name"]}/bp_results/',
134
+ sync=False,
135
+ )
136
+
137
+ shared_state["resources"].append(custom_batch_prediction_job)
138
+
139
+ in_progress_done_check = custom_job.done()
140
+ custom_job.wait_for_resource_creation()
141
+
142
+ automl_job.wait_for_resource_creation()
143
+ # custom_batch_prediction_job.wait_for_resource_creation()
144
+
145
+ # Send online prediction with same instance to both deployed models
146
+ # This sample is taken from an observation where median_house_value = 94600
147
+ custom_endpoint.wait()
148
+
149
+ # Check scheduling is correctly set
150
+ assert (
151
+ custom_job._gca_resource.training_task_inputs["scheduling"]["timeout"]
152
+ == "1234s"
153
+ )
154
+ assert (
155
+ custom_job._gca_resource.training_task_inputs["scheduling"][
156
+ "restartJobOnWorkerRestart"
157
+ ]
158
+ is True
159
+ )
160
+
161
+ custom_prediction = custom_endpoint.predict([_INSTANCE], timeout=180.0)
162
+
163
+ custom_batch_prediction_job.wait()
164
+
165
+ automl_endpoint.wait()
166
+ automl_prediction = automl_endpoint.predict(
167
+ [{k: str(v) for k, v in _INSTANCE.items()}], # Cast int values to strings
168
+ timeout=180.0,
169
+ )
170
+
171
+ # Test lazy loading of Endpoint, check getter was never called after predict()
172
+ custom_endpoint = aiplatform.Endpoint(custom_endpoint.resource_name)
173
+ custom_endpoint.predict([_INSTANCE])
174
+
175
+ completion_done_check = custom_job.done()
176
+ assert custom_endpoint._skipped_getter_call()
177
+
178
+ assert (
179
+ custom_job.state
180
+ == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
181
+ )
182
+ assert (
183
+ automl_job.state
184
+ == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
185
+ )
186
+ assert (
187
+ custom_batch_prediction_job.state
188
+ == gca_job_state.JobState.JOB_STATE_SUCCEEDED
189
+ )
190
+
191
+ # Ensure batch prediction errors output file is empty
192
+ batch_predict_gcs_output_path = (
193
+ custom_batch_prediction_job.output_info.gcs_output_directory
194
+ )
195
+ client = storage.Client()
196
+
197
+ for blob in client.list_blobs(
198
+ bucket_or_name=shared_state["staging_bucket_name"],
199
+ prefix=f"bp_results/{batch_predict_gcs_output_path.split('/')[-1]}",
200
+ ):
201
+ # There are always 2 files in this output path: 1 with errors, 1 with predictions
202
+ if "errors" in blob.name:
203
+ error_output_filestr = blob.download_as_string().decode()
204
+ assert not error_output_filestr
205
+
206
+ # Ensure a single prediction was returned
207
+ assert len(custom_prediction.predictions) == 1
208
+ assert len(automl_prediction.predictions) == 1
209
+
210
+ # Ensure the models are remotely accurate
211
+ try:
212
+ automl_result = automl_prediction.predictions[0]["value"]
213
+ custom_result = custom_prediction.predictions[0][0]
214
+ assert 200000 > automl_result > 50000
215
+ assert 200000 > custom_result > 50000
216
+ except KeyError as e:
217
+ raise RuntimeError("Unexpected prediction response structure:", e)
218
+
219
+ # Check done() method works correctly
220
+ assert in_progress_done_check is False
221
+ assert completion_done_check is True
testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_experiments.py ADDED
@@ -0,0 +1,769 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # Copyright 2023 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ #
17
+ import tempfile
18
+
19
+ import uuid
20
+ import pytest
21
+
22
+ from google.api_core import exceptions
23
+ from google.cloud import storage
24
+
25
+ from google.cloud import aiplatform
26
+ from google.cloud.aiplatform.utils import rest_utils
27
+ from google.cloud.aiplatform.metadata.schema.google import (
28
+ artifact_schema as google_artifact_schema,
29
+ )
30
+ from tests.system.aiplatform import e2e_base
31
+ from tests.system.aiplatform import test_model_upload
32
+
33
+ import numpy as np
34
+ import sklearn
35
+ from sklearn.linear_model import LinearRegression
36
+
37
+
38
+ _RUN = "run-1"
39
+ _PARAMS = {"sdk-param-test-1": 0.1, "sdk-param-test-2": 0.2}
40
+ _METRICS = {"sdk-metric-test-1": 0.8, "sdk-metric-test-2": 100.0}
41
+
42
+ _RUN_2 = "run-2"
43
+ _PARAMS_2 = {"sdk-param-test-1": 0.2, "sdk-param-test-2": 0.4}
44
+ _METRICS_2 = {"sdk-metric-test-1": 1.6, "sdk-metric-test-2": 200.0}
45
+
46
+ _READ_TIME_SERIES_BATCH_SIZE = 20
47
+
48
+ _TIME_SERIES_METRIC_KEY = "accuracy"
49
+
50
+ _CLASSIFICATION_METRICS = {
51
+ "display_name": "my-classification-metrics",
52
+ "labels": ["cat", "dog"],
53
+ "matrix": [[9, 1], [1, 9]],
54
+ "fpr": [0.1, 0.5, 0.9],
55
+ "tpr": [0.1, 0.7, 0.9],
56
+ "threshold": [0.9, 0.5, 0.1],
57
+ }
58
+
59
+
60
+ @pytest.mark.usefixtures(
61
+ "prepare_staging_bucket", "delete_staging_bucket", "tear_down_resources"
62
+ )
63
+ class TestExperiments(e2e_base.TestEndToEnd):
64
+
65
+ _temp_prefix = "tmpvrtxsdk-e2e"
66
+
67
+ def setup_class(cls):
68
+ cls._experiment_name = cls._make_display_name("")[:64]
69
+ cls._experiment_name_2 = cls._make_display_name("")[:64]
70
+ cls._experiment_model_name = cls._make_display_name("sklearn-model")[:64]
71
+ cls._dataset_artifact_name = cls._make_display_name("")[:64]
72
+ cls._dataset_artifact_uri = cls._make_display_name("ds-uri")
73
+ cls._pipeline_job_id = cls._make_display_name("job-id")
74
+
75
+ def test_create_experiment(self, shared_state):
76
+
77
+ # Truncating the name because of resource id constraints from the service
78
+ tensorboard = aiplatform.Tensorboard.create(
79
+ project=e2e_base._PROJECT,
80
+ location=e2e_base._LOCATION,
81
+ display_name=self._experiment_name,
82
+ )
83
+
84
+ shared_state["resources"] = [tensorboard]
85
+
86
+ aiplatform.init(
87
+ project=e2e_base._PROJECT,
88
+ location=e2e_base._LOCATION,
89
+ experiment=self._experiment_name,
90
+ experiment_tensorboard=tensorboard,
91
+ )
92
+
93
+ shared_state["resources"].append(
94
+ aiplatform.metadata.metadata._experiment_tracker.experiment
95
+ )
96
+
97
+ def test_get_experiment(self):
98
+ experiment = aiplatform.Experiment(
99
+ experiment_name=self._experiment_name,
100
+ project=e2e_base._PROJECT,
101
+ location=e2e_base._LOCATION,
102
+ )
103
+ assert experiment.name == self._experiment_name
104
+
105
+ def test_start_run(self):
106
+ aiplatform.init(
107
+ project=e2e_base._PROJECT,
108
+ location=e2e_base._LOCATION,
109
+ experiment=self._experiment_name,
110
+ )
111
+ run = aiplatform.start_run(_RUN)
112
+ assert run.name == _RUN
113
+
114
+ def test_get_run(self):
115
+ run = aiplatform.ExperimentRun(
116
+ run_name=_RUN,
117
+ experiment=self._experiment_name,
118
+ project=e2e_base._PROJECT,
119
+ location=e2e_base._LOCATION,
120
+ )
121
+ assert run.name == _RUN
122
+ assert run.state == aiplatform.gapic.Execution.State.RUNNING
123
+
124
+ def test_log_params(self):
125
+ aiplatform.init(
126
+ project=e2e_base._PROJECT,
127
+ location=e2e_base._LOCATION,
128
+ experiment=self._experiment_name,
129
+ )
130
+ aiplatform.start_run(_RUN, resume=True)
131
+ aiplatform.log_params(_PARAMS)
132
+ run = aiplatform.ExperimentRun(run_name=_RUN, experiment=self._experiment_name)
133
+ assert run.get_params() == _PARAMS
134
+
135
+ def test_log_metrics(self):
136
+ aiplatform.init(
137
+ project=e2e_base._PROJECT,
138
+ location=e2e_base._LOCATION,
139
+ experiment=self._experiment_name,
140
+ )
141
+ aiplatform.start_run(_RUN, resume=True)
142
+ aiplatform.log_metrics(_METRICS)
143
+ run = aiplatform.ExperimentRun(run_name=_RUN, experiment=self._experiment_name)
144
+ assert run.get_metrics() == _METRICS
145
+
146
+ def test_log_time_series_metrics(self):
147
+ aiplatform.init(
148
+ project=e2e_base._PROJECT,
149
+ location=e2e_base._LOCATION,
150
+ experiment=self._experiment_name,
151
+ )
152
+
153
+ aiplatform.start_run(_RUN, resume=True)
154
+
155
+ for i in range(5):
156
+ aiplatform.log_time_series_metrics({_TIME_SERIES_METRIC_KEY: i})
157
+
158
+ run = aiplatform.ExperimentRun(run_name=_RUN, experiment=self._experiment_name)
159
+
160
+ time_series_result = run.get_time_series_data_frame()[
161
+ [_TIME_SERIES_METRIC_KEY, "step"]
162
+ ].to_dict("list")
163
+
164
+ assert time_series_result == {
165
+ "step": list(range(1, 6)),
166
+ _TIME_SERIES_METRIC_KEY: [float(value) for value in range(5)],
167
+ }
168
+
169
+ def test_get_time_series_data_frame_batch_read_success(self, shared_state):
170
+ tensorboard = aiplatform.Tensorboard.create(
171
+ project=e2e_base._PROJECT,
172
+ location=e2e_base._LOCATION,
173
+ display_name=self._experiment_name_2,
174
+ )
175
+ shared_state["resources"] = [tensorboard]
176
+ aiplatform.init(
177
+ project=e2e_base._PROJECT,
178
+ location=e2e_base._LOCATION,
179
+ experiment=self._experiment_name_2,
180
+ experiment_tensorboard=tensorboard,
181
+ )
182
+ shared_state["resources"].append(
183
+ aiplatform.metadata.metadata._experiment_tracker.experiment
184
+ )
185
+ aiplatform.start_run(_RUN)
186
+ for i in range(_READ_TIME_SERIES_BATCH_SIZE + 1):
187
+ aiplatform.log_time_series_metrics({f"{_TIME_SERIES_METRIC_KEY}-{i}": 1})
188
+
189
+ run = aiplatform.ExperimentRun(
190
+ run_name=_RUN, experiment=self._experiment_name_2
191
+ )
192
+ time_series_result = run.get_time_series_data_frame()
193
+
194
+ assert len(time_series_result) > _READ_TIME_SERIES_BATCH_SIZE
195
+
196
+ def test_log_classification_metrics(self, shared_state):
197
+ aiplatform.init(
198
+ project=e2e_base._PROJECT,
199
+ location=e2e_base._LOCATION,
200
+ experiment=self._experiment_name,
201
+ )
202
+ aiplatform.start_run(_RUN, resume=True)
203
+ classification_metrics = aiplatform.log_classification_metrics(
204
+ display_name=_CLASSIFICATION_METRICS["display_name"],
205
+ labels=_CLASSIFICATION_METRICS["labels"],
206
+ matrix=_CLASSIFICATION_METRICS["matrix"],
207
+ fpr=_CLASSIFICATION_METRICS["fpr"],
208
+ tpr=_CLASSIFICATION_METRICS["tpr"],
209
+ threshold=_CLASSIFICATION_METRICS["threshold"],
210
+ )
211
+
212
+ run = aiplatform.ExperimentRun(run_name=_RUN, experiment=self._experiment_name)
213
+ metrics = run.get_classification_metrics()[0]
214
+ metric_artifact = aiplatform.Artifact(metrics.pop("id"))
215
+ assert metrics == _CLASSIFICATION_METRICS
216
+ assert isinstance(
217
+ classification_metrics, google_artifact_schema.ClassificationMetrics
218
+ )
219
+ metric_artifact.delete()
220
+
221
+ def test_log_model(self, shared_state):
222
+ aiplatform.init(
223
+ project=e2e_base._PROJECT,
224
+ location=e2e_base._LOCATION,
225
+ experiment=self._experiment_name,
226
+ )
227
+ aiplatform.start_run(_RUN, resume=True)
228
+
229
+ train_x = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
230
+ train_y = np.dot(train_x, np.array([1, 2])) + 3
231
+ model = LinearRegression()
232
+ model.fit(train_x, train_y)
233
+
234
+ model_artifact = aiplatform.log_model(
235
+ model=model,
236
+ artifact_id=self._experiment_model_name,
237
+ uri=f"gs://{shared_state['staging_bucket_name']}/sklearn-model",
238
+ input_example=train_x,
239
+ )
240
+ shared_state["resources"].append(model_artifact)
241
+
242
+ run = aiplatform.ExperimentRun(run_name=_RUN, experiment=self._experiment_name)
243
+ experiment_model = run.get_experiment_models()[0]
244
+ assert "sklearn-model" in experiment_model.name
245
+ assert (
246
+ experiment_model.uri
247
+ == f"gs://{shared_state['staging_bucket_name']}/sklearn-model"
248
+ )
249
+ assert experiment_model.get_model_info() == {
250
+ "model_class": "sklearn.linear_model._base.LinearRegression",
251
+ "framework_name": "sklearn",
252
+ "framework_version": sklearn.__version__,
253
+ "input_example": {
254
+ "type": "numpy.ndarray",
255
+ "data": train_x.tolist(),
256
+ },
257
+ }
258
+ experiment_model.delete()
259
+
260
+ def test_create_artifact(self, shared_state):
261
+ ds = aiplatform.Artifact.create(
262
+ schema_title="system.Dataset",
263
+ resource_id=self._dataset_artifact_name,
264
+ uri=self._dataset_artifact_uri,
265
+ project=e2e_base._PROJECT,
266
+ location=e2e_base._LOCATION,
267
+ )
268
+
269
+ shared_state["resources"].append(ds)
270
+ assert ds.uri == self._dataset_artifact_uri
271
+
272
+ def test_get_artifact_by_uri(self):
273
+ ds = aiplatform.Artifact.get_with_uri(
274
+ uri=self._dataset_artifact_uri,
275
+ project=e2e_base._PROJECT,
276
+ location=e2e_base._LOCATION,
277
+ )
278
+
279
+ assert ds.uri == self._dataset_artifact_uri
280
+ assert ds.name == self._dataset_artifact_name
281
+
282
+ def test_log_execution_and_artifact(self, shared_state):
283
+ aiplatform.init(
284
+ project=e2e_base._PROJECT,
285
+ location=e2e_base._LOCATION,
286
+ experiment=self._experiment_name,
287
+ )
288
+ aiplatform.start_run(_RUN, resume=True)
289
+
290
+ with aiplatform.start_execution(
291
+ schema_title="system.ContainerExecution",
292
+ resource_id=self._make_display_name("execution"),
293
+ ) as execution:
294
+
295
+ shared_state["resources"].append(execution)
296
+
297
+ ds = aiplatform.Artifact(
298
+ artifact_name=self._dataset_artifact_name,
299
+ )
300
+ execution.assign_input_artifacts([ds])
301
+
302
+ model = aiplatform.Artifact.create(schema_title="system.Model")
303
+ shared_state["resources"].append(model)
304
+
305
+ storage_client = storage.Client(project=e2e_base._PROJECT)
306
+ model_blob = storage.Blob.from_string(
307
+ uri=test_model_upload._XGBOOST_MODEL_URI, client=storage_client
308
+ )
309
+ model_path = tempfile.mktemp() + ".my_model.xgb"
310
+ model_blob.download_to_filename(filename=model_path)
311
+
312
+ vertex_model = aiplatform.Model.upload_xgboost_model_file(
313
+ display_name=self._make_display_name("model"),
314
+ model_file_path=model_path,
315
+ )
316
+ shared_state["resources"].append(vertex_model)
317
+
318
+ execution.assign_output_artifacts([model, vertex_model])
319
+
320
+ input_artifacts = execution.get_input_artifacts()
321
+ assert input_artifacts[0].name == ds.name
322
+
323
+ output_artifacts = execution.get_output_artifacts()
324
+ # system.Model, google.VertexModel
325
+ output_artifacts.sort(key=lambda artifact: artifact.schema_title, reverse=True)
326
+
327
+ shared_state["resources"].append(output_artifacts[-1])
328
+
329
+ assert output_artifacts[0].name == model.name
330
+ assert output_artifacts[1].uri == rest_utils.make_gcp_resource_rest_url(
331
+ resource=vertex_model
332
+ )
333
+
334
+ run = aiplatform.ExperimentRun(run_name=_RUN, experiment=self._experiment_name)
335
+ executions = run.get_executions()
336
+ assert executions[0].name == execution.name
337
+
338
+ artifacts = run.get_artifacts()
339
+
340
+ # system.Model, system.Dataset, google.VertexTensorboardRun, google.VertexModel
341
+ artifacts.sort(key=lambda artifact: artifact.schema_title, reverse=True)
342
+ assert artifacts.pop().uri == rest_utils.make_gcp_resource_rest_url(
343
+ resource=vertex_model
344
+ )
345
+
346
+ # tensorboard run artifact is also included
347
+ assert sorted([artifact.name for artifact in artifacts]) == sorted(
348
+ [ds.name, model.name, run._tensorboard_run_id(run.resource_id)]
349
+ )
350
+
351
+ def test_end_run(self):
352
+ aiplatform.init(
353
+ project=e2e_base._PROJECT,
354
+ location=e2e_base._LOCATION,
355
+ experiment=self._experiment_name,
356
+ )
357
+ aiplatform.start_run(_RUN, resume=True)
358
+ aiplatform.end_run()
359
+ run = aiplatform.ExperimentRun(run_name=_RUN, experiment=self._experiment_name)
360
+ assert run.state == aiplatform.gapic.Execution.State.COMPLETE
361
+
362
+ def test_run_context_manager(self):
363
+ aiplatform.init(
364
+ project=e2e_base._PROJECT,
365
+ location=e2e_base._LOCATION,
366
+ experiment=self._experiment_name,
367
+ )
368
+ with aiplatform.start_run(_RUN_2) as run:
369
+ run.log_params(_PARAMS_2)
370
+ run.log_metrics(_METRICS_2)
371
+ assert run.state == aiplatform.gapic.Execution.State.RUNNING
372
+
373
+ assert run.state == aiplatform.gapic.Execution.State.COMPLETE
374
+
375
+ def test_add_pipeline_job_to_experiment(self, shared_state):
376
+ import kfp.v2.dsl as dsl
377
+ import kfp.v2.compiler as compiler
378
+ from kfp.v2.dsl import component, Metrics, Output
379
+
380
+ @component
381
+ def trainer(
382
+ learning_rate: float, dropout_rate: float, metrics: Output[Metrics]
383
+ ):
384
+ metrics.log_metric("accuracy", 0.8)
385
+ metrics.log_metric("mse", 1.2)
386
+
387
+ @dsl.pipeline(name=self._make_display_name("pipeline"))
388
+ def pipeline(learning_rate: float, dropout_rate: float):
389
+ trainer(learning_rate=learning_rate, dropout_rate=dropout_rate)
390
+
391
+ compiler.Compiler().compile(
392
+ pipeline_func=pipeline, package_path="pipeline.json"
393
+ )
394
+
395
+ job = aiplatform.PipelineJob(
396
+ display_name=self._make_display_name("experiment pipeline job"),
397
+ template_path="pipeline.json",
398
+ job_id=self._pipeline_job_id,
399
+ pipeline_root=f'gs://{shared_state["staging_bucket_name"]}',
400
+ parameter_values={"learning_rate": 0.1, "dropout_rate": 0.2},
401
+ project=e2e_base._PROJECT,
402
+ location=e2e_base._LOCATION,
403
+ )
404
+
405
+ job.submit(
406
+ experiment=self._experiment_name,
407
+ )
408
+
409
+ shared_state["resources"].append(job)
410
+
411
+ job.wait()
412
+
413
+ test_experiment = job.get_associated_experiment()
414
+
415
+ assert test_experiment.name == self._experiment_name
416
+
417
+ def test_get_experiments_df(self):
418
+ aiplatform.init(
419
+ project=e2e_base._PROJECT,
420
+ location=e2e_base._LOCATION,
421
+ experiment=self._experiment_name,
422
+ )
423
+
424
+ df = aiplatform.get_experiment_df()
425
+
426
+ pipelines_param_and_metrics = {
427
+ "param.dropout_rate": 0.2,
428
+ "param.learning_rate": 0.1,
429
+ "metric.accuracy": 0.8,
430
+ "metric.mse": 1.2,
431
+ }
432
+
433
+ true_df_dict_1 = {f"metric.{key}": value for key, value in _METRICS.items()}
434
+ for key, value in _PARAMS.items():
435
+ true_df_dict_1[f"param.{key}"] = value
436
+
437
+ true_df_dict_1["experiment_name"] = self._experiment_name
438
+ true_df_dict_1["run_name"] = _RUN
439
+ true_df_dict_1["state"] = aiplatform.gapic.Execution.State.COMPLETE.name
440
+ true_df_dict_1["run_type"] = aiplatform.metadata.constants.SYSTEM_EXPERIMENT_RUN
441
+ true_df_dict_1[f"time_series_metric.{_TIME_SERIES_METRIC_KEY}"] = 4.0
442
+
443
+ true_df_dict_2 = {f"metric.{key}": value for key, value in _METRICS_2.items()}
444
+ for key, value in _PARAMS_2.items():
445
+ true_df_dict_2[f"param.{key}"] = value
446
+
447
+ true_df_dict_2["experiment_name"] = self._experiment_name
448
+ true_df_dict_2["run_name"] = _RUN_2
449
+ true_df_dict_2["state"] = aiplatform.gapic.Execution.State.COMPLETE.name
450
+ true_df_dict_2["run_type"] = aiplatform.metadata.constants.SYSTEM_EXPERIMENT_RUN
451
+ true_df_dict_2[f"time_series_metric.{_TIME_SERIES_METRIC_KEY}"] = 0.0
452
+ true_df_dict_2.update(pipelines_param_and_metrics)
453
+
454
+ true_df_dict_3 = {
455
+ "experiment_name": self._experiment_name,
456
+ "run_name": self._pipeline_job_id,
457
+ "run_type": aiplatform.metadata.constants.SYSTEM_PIPELINE_RUN,
458
+ "state": aiplatform.gapic.Execution.State.COMPLETE.name,
459
+ "time_series_metric.accuracy": 0.0,
460
+ }
461
+
462
+ true_df_dict_3.update(pipelines_param_and_metrics)
463
+
464
+ for key in pipelines_param_and_metrics.keys():
465
+ true_df_dict_1[key] = 0.0
466
+ true_df_dict_2[key] = 0.0
467
+
468
+ for key in _PARAMS.keys():
469
+ true_df_dict_3[f"param.{key}"] = 0.0
470
+
471
+ for key in _METRICS.keys():
472
+ true_df_dict_3[f"metric.{key}"] = 0.0
473
+
474
+ assert sorted(
475
+ [true_df_dict_1, true_df_dict_2, true_df_dict_3],
476
+ key=lambda d: d["run_name"],
477
+ ) == sorted(df.fillna(0.0).to_dict("records"), key=lambda d: d["run_name"])
478
+
479
+ def test_get_experiments_df_include_time_series_false(self):
480
+ aiplatform.init(
481
+ project=e2e_base._PROJECT,
482
+ location=e2e_base._LOCATION,
483
+ experiment=self._experiment_name,
484
+ )
485
+
486
+ df = aiplatform.get_experiment_df(include_time_series=False)
487
+
488
+ pipelines_param_and_metrics = {
489
+ "param.dropout_rate": 0.2,
490
+ "param.learning_rate": 0.1,
491
+ "metric.accuracy": 0.8,
492
+ "metric.mse": 1.2,
493
+ }
494
+
495
+ true_df_dict_1 = {f"metric.{key}": value for key, value in _METRICS.items()}
496
+ for key, value in _PARAMS.items():
497
+ true_df_dict_1[f"param.{key}"] = value
498
+
499
+ true_df_dict_1["experiment_name"] = self._experiment_name
500
+ true_df_dict_1["run_name"] = _RUN
501
+ true_df_dict_1["state"] = aiplatform.gapic.Execution.State.COMPLETE.name
502
+ true_df_dict_1["run_type"] = aiplatform.metadata.constants.SYSTEM_EXPERIMENT_RUN
503
+
504
+ true_df_dict_2 = {f"metric.{key}": value for key, value in _METRICS_2.items()}
505
+ for key, value in _PARAMS_2.items():
506
+ true_df_dict_2[f"param.{key}"] = value
507
+
508
+ true_df_dict_2["experiment_name"] = self._experiment_name
509
+ true_df_dict_2["run_name"] = _RUN_2
510
+ true_df_dict_2["state"] = aiplatform.gapic.Execution.State.COMPLETE.name
511
+ true_df_dict_2["run_type"] = aiplatform.metadata.constants.SYSTEM_EXPERIMENT_RUN
512
+ true_df_dict_2.update(pipelines_param_and_metrics)
513
+
514
+ true_df_dict_3 = {
515
+ "experiment_name": self._experiment_name,
516
+ "run_name": self._pipeline_job_id,
517
+ "run_type": aiplatform.metadata.constants.SYSTEM_PIPELINE_RUN,
518
+ "state": aiplatform.gapic.Execution.State.COMPLETE.name,
519
+ }
520
+
521
+ true_df_dict_3.update(pipelines_param_and_metrics)
522
+
523
+ for key in pipelines_param_and_metrics.keys():
524
+ true_df_dict_1[key] = 0.0
525
+ true_df_dict_2[key] = 0.0
526
+
527
+ for key in _PARAMS.keys():
528
+ true_df_dict_3[f"param.{key}"] = 0.0
529
+
530
+ for key in _METRICS.keys():
531
+ true_df_dict_3[f"metric.{key}"] = 0.0
532
+
533
+ assert sorted(
534
+ [true_df_dict_1, true_df_dict_2, true_df_dict_3],
535
+ key=lambda d: d["run_name"],
536
+ ) == sorted(df.fillna(0.0).to_dict("records"), key=lambda d: d["run_name"])
537
+
538
+ def test_delete_run_does_not_exist_raises_exception(self):
539
+ run = aiplatform.ExperimentRun(
540
+ run_name=_RUN,
541
+ experiment=self._experiment_name,
542
+ project=e2e_base._PROJECT,
543
+ location=e2e_base._LOCATION,
544
+ )
545
+ run.delete(delete_backing_tensorboard_run=True)
546
+
547
+ with pytest.raises(exceptions.NotFound):
548
+ aiplatform.ExperimentRun(run_name=_RUN, experiment=self._experiment_name)
549
+
550
+ def test_delete_run_success(self):
551
+ aiplatform.init(
552
+ project=e2e_base._PROJECT,
553
+ location=e2e_base._LOCATION,
554
+ experiment=self._experiment_name,
555
+ )
556
+ aiplatform.start_run(_RUN)
557
+ run = aiplatform.ExperimentRun(
558
+ run_name=_RUN,
559
+ experiment=self._experiment_name,
560
+ project=e2e_base._PROJECT,
561
+ location=e2e_base._LOCATION,
562
+ )
563
+ aiplatform.end_run()
564
+
565
+ run.delete(delete_backing_tensorboard_run=True)
566
+
567
+ with pytest.raises(exceptions.NotFound):
568
+ aiplatform.ExperimentRun(
569
+ run_name=_RUN,
570
+ experiment=self._experiment_name,
571
+ project=e2e_base._PROJECT,
572
+ location=e2e_base._LOCATION,
573
+ )
574
+
575
+ def test_reuse_run_success(self):
576
+ aiplatform.init(
577
+ project=e2e_base._PROJECT,
578
+ location=e2e_base._LOCATION,
579
+ experiment=self._experiment_name,
580
+ )
581
+ aiplatform.start_run(_RUN)
582
+ run = aiplatform.ExperimentRun(
583
+ run_name=_RUN,
584
+ experiment=self._experiment_name,
585
+ project=e2e_base._PROJECT,
586
+ location=e2e_base._LOCATION,
587
+ )
588
+ aiplatform.end_run()
589
+ run.delete(delete_backing_tensorboard_run=True)
590
+
591
+ aiplatform.start_run(_RUN)
592
+ aiplatform.end_run()
593
+
594
+ run = aiplatform.ExperimentRun(
595
+ run_name=_RUN,
596
+ experiment=self._experiment_name,
597
+ project=e2e_base._PROJECT,
598
+ location=e2e_base._LOCATION,
599
+ )
600
+ assert run.name == _RUN
601
+
602
+ def test_delete_run_then_tensorboard_success(self):
603
+ aiplatform.init(
604
+ project=e2e_base._PROJECT,
605
+ location=e2e_base._LOCATION,
606
+ experiment=self._experiment_name,
607
+ )
608
+ aiplatform.start_run(_RUN, resume=True)
609
+ run = aiplatform.ExperimentRun(
610
+ run_name=_RUN,
611
+ experiment=self._experiment_name,
612
+ project=e2e_base._PROJECT,
613
+ location=e2e_base._LOCATION,
614
+ )
615
+ aiplatform.end_run()
616
+ run.delete()
617
+ tensorboard_run_artifact = aiplatform.metadata.artifact.Artifact(
618
+ artifact_name=f"{self._experiment_name}-{_RUN}-tb-run"
619
+ )
620
+ tensorboard_run_resource = aiplatform.TensorboardRun(
621
+ tensorboard_run_artifact.metadata["resourceName"]
622
+ )
623
+ tensorboard_run_resource.delete()
624
+ tensorboard_run_artifact.delete()
625
+
626
+ aiplatform.start_run(_RUN)
627
+ aiplatform.end_run()
628
+
629
+ run = aiplatform.ExperimentRun(
630
+ run_name=_RUN,
631
+ experiment=self._experiment_name,
632
+ project=e2e_base._PROJECT,
633
+ location=e2e_base._LOCATION,
634
+ )
635
+ assert run.name == _RUN
636
+
637
+ def test_delete_wout_backing_tensorboard_reuse_run_raises_exception(self):
638
+ aiplatform.init(
639
+ project=e2e_base._PROJECT,
640
+ location=e2e_base._LOCATION,
641
+ experiment=self._experiment_name,
642
+ )
643
+ aiplatform.start_run(_RUN, resume=True)
644
+ run = aiplatform.ExperimentRun(
645
+ run_name=_RUN,
646
+ experiment=self._experiment_name,
647
+ project=e2e_base._PROJECT,
648
+ location=e2e_base._LOCATION,
649
+ )
650
+ aiplatform.end_run()
651
+ run.delete()
652
+
653
+ with pytest.raises(ValueError):
654
+ aiplatform.start_run(_RUN)
655
+
656
+ def test_delete_experiment_does_not_exist_raises_exception(self):
657
+ experiment = aiplatform.Experiment(
658
+ experiment_name=self._experiment_name,
659
+ project=e2e_base._PROJECT,
660
+ location=e2e_base._LOCATION,
661
+ )
662
+ experiment.delete(delete_backing_tensorboard_runs=True)
663
+
664
+ with pytest.raises(exceptions.NotFound):
665
+ aiplatform.Experiment(experiment_name=self._experiment_name)
666
+
667
+ def test_init_associates_global_tensorboard_to_experiment(self, shared_state):
668
+
669
+ tensorboard = aiplatform.Tensorboard.create(
670
+ project=e2e_base._PROJECT,
671
+ location=e2e_base._LOCATION,
672
+ display_name=self._make_display_name("")[:64],
673
+ )
674
+
675
+ shared_state["resources"] = [tensorboard]
676
+
677
+ aiplatform.init(
678
+ project=e2e_base._PROJECT,
679
+ location=e2e_base._LOCATION,
680
+ experiment_tensorboard=tensorboard,
681
+ )
682
+
683
+ assert (
684
+ aiplatform.metadata.metadata._experiment_tracker._global_tensorboard
685
+ == tensorboard
686
+ )
687
+
688
+ new_experiment_name = self._make_display_name("")[:64]
689
+ new_experiment_resource = aiplatform.Experiment.create(
690
+ experiment_name=new_experiment_name
691
+ )
692
+
693
+ shared_state["resources"].append(new_experiment_resource)
694
+
695
+ aiplatform.init(
696
+ project=e2e_base._PROJECT,
697
+ location=e2e_base._LOCATION,
698
+ experiment=new_experiment_name,
699
+ )
700
+
701
+ assert (
702
+ new_experiment_resource._lookup_backing_tensorboard().resource_name
703
+ == tensorboard.resource_name
704
+ )
705
+
706
+ assert (
707
+ new_experiment_resource._metadata_context.metadata.get(
708
+ aiplatform.metadata.constants._BACKING_TENSORBOARD_RESOURCE_KEY
709
+ )
710
+ == tensorboard.resource_name
711
+ )
712
+
713
+ def test_get_backing_tensorboard_resource_returns_tensorboard(self, shared_state):
714
+ tensorboard = aiplatform.Tensorboard.create(
715
+ project=e2e_base._PROJECT,
716
+ location=e2e_base._LOCATION,
717
+ display_name=self._make_display_name("")[:64],
718
+ )
719
+ shared_state["resources"] = [tensorboard]
720
+ aiplatform.init(
721
+ project=e2e_base._PROJECT,
722
+ location=e2e_base._LOCATION,
723
+ experiment=self._experiment_name,
724
+ experiment_tensorboard=tensorboard,
725
+ )
726
+ experiment = aiplatform.Experiment(
727
+ self._experiment_name,
728
+ project=e2e_base._PROJECT,
729
+ location=e2e_base._LOCATION,
730
+ )
731
+
732
+ assert (
733
+ experiment.get_backing_tensorboard_resource().resource_name
734
+ == tensorboard.resource_name
735
+ )
736
+
737
+ def test_get_backing_tensorboard_resource_returns_none(self):
738
+ new_experiment_name = f"example-{uuid.uuid1()}"
739
+ aiplatform.init(
740
+ project=e2e_base._PROJECT,
741
+ location=e2e_base._LOCATION,
742
+ experiment=new_experiment_name,
743
+ experiment_tensorboard=False,
744
+ )
745
+ new_experiment = aiplatform.Experiment(
746
+ new_experiment_name,
747
+ project=e2e_base._PROJECT,
748
+ location=e2e_base._LOCATION,
749
+ )
750
+
751
+ assert new_experiment.get_backing_tensorboard_resource() is None
752
+
753
+ def test_delete_backing_tensorboard_experiment_run_success(self):
754
+ aiplatform.init(
755
+ project=e2e_base._PROJECT,
756
+ location=e2e_base._LOCATION,
757
+ experiment=self._experiment_name,
758
+ )
759
+ experiment = aiplatform.Experiment(
760
+ self._experiment_name,
761
+ project=e2e_base._PROJECT,
762
+ location=e2e_base._LOCATION,
763
+ )
764
+ experiment.get_backing_tensorboard_resource().delete()
765
+ run = aiplatform.start_run(_RUN)
766
+ aiplatform.end_run()
767
+
768
+ assert experiment.get_backing_tensorboard_resource() is None
769
+ assert run.name == _RUN
testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_featurestore.py ADDED
@@ -0,0 +1,714 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # Copyright 2021 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ #
17
+
18
+ import datetime
19
+ import logging
20
+ import pytest
21
+
22
+ from google.cloud import aiplatform
23
+ from tests.system.aiplatform import e2e_base
24
+
25
+ import pandas as pd
26
+
27
+ _TEST_USERS_ENTITY_TYPE_GCS_SRC = (
28
+ "gs://cloud-samples-data-us-central1/vertex-ai/feature-store/datasets/users.avro"
29
+ )
30
+
31
+ _TEST_READ_INSTANCE_SRC = "gs://cloud-samples-data-us-central1/vertex-ai/feature-store/datasets/movie_prediction.csv"
32
+
33
+ _TEST_FEATURESTORE_ID = "movie_prediction"
34
+ _TEST_USER_ENTITY_TYPE_ID = "users"
35
+ _TEST_MOVIE_ENTITY_TYPE_ID = "movies"
36
+ _TEST_MOVIE_ENTITY_TYPE_UPDATE_LABELS = {"my_key_update": "my_value_update"}
37
+
38
+ _TEST_USER_AGE_FEATURE_ID = "age"
39
+ _TEST_USER_GENDER_FEATURE_ID = "gender"
40
+ _TEST_USER_LIKED_GENRES_FEATURE_ID = "liked_genres"
41
+
42
+ _TEST_MOVIE_TITLE_FEATURE_ID = "title"
43
+ _TEST_MOVIE_GENRES_FEATURE_ID = "genres"
44
+ _TEST_MOVIE_AVERAGE_RATING_FEATURE_ID = "average_rating"
45
+
46
+
47
+ @pytest.mark.usefixtures(
48
+ "prepare_staging_bucket",
49
+ "delete_staging_bucket",
50
+ "prepare_bigquery_dataset",
51
+ "delete_bigquery_dataset",
52
+ "tear_down_resources",
53
+ )
54
+ class TestFeaturestore(e2e_base.TestEndToEnd):
55
+
56
+ _temp_prefix = "temp_vertex_sdk_e2e_featurestore_test"
57
+
58
+ def test_create_get_list_featurestore(self, shared_state):
59
+ aiplatform.init(
60
+ project=e2e_base._PROJECT,
61
+ location=e2e_base._LOCATION,
62
+ )
63
+
64
+ featurestore_id = self._make_display_name(key=_TEST_FEATURESTORE_ID).replace(
65
+ "-", "_"
66
+ )[:60]
67
+ featurestore = aiplatform.Featurestore.create(
68
+ featurestore_id=featurestore_id, online_store_fixed_node_count=1
69
+ )
70
+
71
+ shared_state["resources"] = [featurestore]
72
+ shared_state["featurestore"] = featurestore
73
+ shared_state["featurestore_name"] = featurestore.resource_name
74
+
75
+ get_featurestore = aiplatform.Featurestore(
76
+ featurestore_name=featurestore.resource_name
77
+ )
78
+ assert featurestore.resource_name == get_featurestore.resource_name
79
+
80
+ list_featurestores = aiplatform.Featurestore.list()
81
+ assert get_featurestore.resource_name in [
82
+ featurestore.resource_name for featurestore in list_featurestores
83
+ ]
84
+
85
+ def test_create_get_list_entity_types(self, shared_state):
86
+
87
+ assert shared_state["featurestore"]
88
+ assert shared_state["featurestore_name"]
89
+
90
+ featurestore = shared_state["featurestore"]
91
+ featurestore_name = shared_state["featurestore_name"]
92
+
93
+ aiplatform.init(
94
+ project=e2e_base._PROJECT,
95
+ location=e2e_base._LOCATION,
96
+ )
97
+
98
+ # Users
99
+ user_entity_type = featurestore.create_entity_type(
100
+ entity_type_id=_TEST_USER_ENTITY_TYPE_ID
101
+ )
102
+ shared_state["user_entity_type"] = user_entity_type
103
+ shared_state["user_entity_type_name"] = user_entity_type.resource_name
104
+
105
+ get_user_entity_type = featurestore.get_entity_type(
106
+ entity_type_id=_TEST_USER_ENTITY_TYPE_ID
107
+ )
108
+ assert user_entity_type.resource_name == get_user_entity_type.resource_name
109
+
110
+ # Movies
111
+ movie_entity_type = aiplatform.EntityType.create(
112
+ entity_type_id=_TEST_MOVIE_ENTITY_TYPE_ID,
113
+ featurestore_name=featurestore_name,
114
+ )
115
+ shared_state["movie_entity_type"] = movie_entity_type
116
+ shared_state["movie_entity_type_name"] = movie_entity_type.resource_name
117
+
118
+ get_movie_entity_type = aiplatform.EntityType(
119
+ entity_type_name=movie_entity_type.resource_name
120
+ )
121
+ assert movie_entity_type.resource_name == get_movie_entity_type.resource_name
122
+
123
+ list_entity_types = aiplatform.EntityType.list(
124
+ featurestore_name=featurestore_name
125
+ )
126
+ assert get_movie_entity_type.resource_name in [
127
+ entity_type.resource_name for entity_type in list_entity_types
128
+ ]
129
+
130
+ # Update information about the movie entity type.
131
+ assert movie_entity_type.labels != _TEST_MOVIE_ENTITY_TYPE_UPDATE_LABELS
132
+
133
+ movie_entity_type.update(
134
+ labels=_TEST_MOVIE_ENTITY_TYPE_UPDATE_LABELS,
135
+ )
136
+
137
+ assert movie_entity_type.labels == _TEST_MOVIE_ENTITY_TYPE_UPDATE_LABELS
138
+
139
+ def test_create_get_list_features(self, shared_state):
140
+
141
+ assert shared_state["user_entity_type"]
142
+ assert shared_state["user_entity_type_name"]
143
+ user_entity_type = shared_state["user_entity_type"]
144
+ user_entity_type_name = shared_state["user_entity_type_name"]
145
+
146
+ aiplatform.init(
147
+ project=e2e_base._PROJECT,
148
+ location=e2e_base._LOCATION,
149
+ )
150
+
151
+ # User Features
152
+ user_age_feature = user_entity_type.create_feature(
153
+ feature_id=_TEST_USER_AGE_FEATURE_ID, value_type="INT64"
154
+ )
155
+ shared_state["user_age_feature_resource_name"] = user_age_feature.resource_name
156
+ get_user_age_feature = user_entity_type.get_feature(
157
+ feature_id=_TEST_USER_AGE_FEATURE_ID
158
+ )
159
+ assert user_age_feature.resource_name == get_user_age_feature.resource_name
160
+
161
+ user_gender_feature = aiplatform.Feature.create(
162
+ feature_id=_TEST_USER_GENDER_FEATURE_ID,
163
+ value_type="STRING",
164
+ entity_type_name=user_entity_type_name,
165
+ )
166
+ shared_state[
167
+ "user_gender_feature_resource_name"
168
+ ] = user_gender_feature.resource_name
169
+
170
+ get_user_gender_feature = aiplatform.Feature(
171
+ feature_name=user_gender_feature.resource_name
172
+ )
173
+ assert (
174
+ user_gender_feature.resource_name == get_user_gender_feature.resource_name
175
+ )
176
+
177
+ user_liked_genres_feature = user_entity_type.create_feature(
178
+ feature_id=_TEST_USER_LIKED_GENRES_FEATURE_ID,
179
+ value_type="STRING_ARRAY",
180
+ )
181
+ shared_state[
182
+ "user_liked_genres_feature_resource_name"
183
+ ] = user_liked_genres_feature.resource_name
184
+
185
+ get_user_liked_genres_feature = aiplatform.Feature(
186
+ feature_name=user_liked_genres_feature.resource_name
187
+ )
188
+ assert (
189
+ user_liked_genres_feature.resource_name
190
+ == get_user_liked_genres_feature.resource_name
191
+ )
192
+
193
+ list_user_features = user_entity_type.list_features()
194
+ list_user_feature_resource_names = [
195
+ feature.resource_name for feature in list_user_features
196
+ ]
197
+
198
+ assert get_user_age_feature.resource_name in list_user_feature_resource_names
199
+ assert get_user_gender_feature.resource_name in list_user_feature_resource_names
200
+ assert (
201
+ get_user_liked_genres_feature.resource_name
202
+ in list_user_feature_resource_names
203
+ )
204
+
205
+ def test_ingest_feature_values(self, shared_state, caplog):
206
+
207
+ assert shared_state["user_entity_type"]
208
+ user_entity_type = shared_state["user_entity_type"]
209
+
210
+ caplog.set_level(logging.INFO)
211
+
212
+ aiplatform.init(
213
+ project=e2e_base._PROJECT,
214
+ location=e2e_base._LOCATION,
215
+ )
216
+
217
+ user_entity_type.ingest_from_gcs(
218
+ feature_ids=[
219
+ _TEST_USER_AGE_FEATURE_ID,
220
+ _TEST_USER_GENDER_FEATURE_ID,
221
+ _TEST_USER_LIKED_GENRES_FEATURE_ID,
222
+ ],
223
+ feature_time="update_time",
224
+ gcs_source_uris=_TEST_USERS_ENTITY_TYPE_GCS_SRC,
225
+ gcs_source_type="avro",
226
+ entity_id_field="user_id",
227
+ worker_count=1,
228
+ )
229
+
230
+ assert "EntityType feature values imported." in caplog.text
231
+
232
+ caplog.clear()
233
+
234
+ def test_batch_create_features(self, shared_state):
235
+ assert shared_state["movie_entity_type"]
236
+ movie_entity_type = shared_state["movie_entity_type"]
237
+
238
+ aiplatform.init(
239
+ project=e2e_base._PROJECT,
240
+ location=e2e_base._LOCATION,
241
+ )
242
+
243
+ movie_feature_configs = {
244
+ _TEST_MOVIE_TITLE_FEATURE_ID: {"value_type": "STRING"},
245
+ _TEST_MOVIE_GENRES_FEATURE_ID: {"value_type": "STRING_ARRAY"},
246
+ _TEST_MOVIE_AVERAGE_RATING_FEATURE_ID: {"value_type": "DOUBLE"},
247
+ }
248
+
249
+ movie_entity_type.batch_create_features(feature_configs=movie_feature_configs)
250
+
251
+ get_movie_title_feature = movie_entity_type.get_feature(
252
+ feature_id=_TEST_MOVIE_TITLE_FEATURE_ID
253
+ )
254
+ get_movie_genres_feature = movie_entity_type.get_feature(
255
+ feature_id=_TEST_MOVIE_GENRES_FEATURE_ID
256
+ )
257
+ get_movie_avg_rating_feature = movie_entity_type.get_feature(
258
+ feature_id=_TEST_MOVIE_AVERAGE_RATING_FEATURE_ID
259
+ )
260
+
261
+ list_movie_features = movie_entity_type.list_features()
262
+ movie_feature_resource_names = [
263
+ feature.resource_name for feature in list_movie_features
264
+ ]
265
+
266
+ assert get_movie_title_feature.resource_name in movie_feature_resource_names
267
+ assert get_movie_genres_feature.resource_name in movie_feature_resource_names
268
+ assert (
269
+ get_movie_avg_rating_feature.resource_name in movie_feature_resource_names
270
+ )
271
+
272
+ def test_ingest_feature_values_from_df_using_feature_time_column_and_online_read_multiple_entities(
273
+ self, shared_state, caplog
274
+ ):
275
+
276
+ assert shared_state["movie_entity_type"]
277
+ movie_entity_type = shared_state["movie_entity_type"]
278
+
279
+ caplog.set_level(logging.INFO)
280
+
281
+ aiplatform.init(
282
+ project=e2e_base._PROJECT,
283
+ location=e2e_base._LOCATION,
284
+ )
285
+
286
+ read_feature_ids = ["average_rating", "title", "genres"]
287
+
288
+ movie_entity_views_df_before_ingest = movie_entity_type.read(
289
+ entity_ids=["movie_01", "movie_02"],
290
+ feature_ids=read_feature_ids,
291
+ )
292
+ expected_data_before_ingest = [
293
+ {
294
+ "entity_id": "movie_01",
295
+ "average_rating": None,
296
+ "title": None,
297
+ "genres": None,
298
+ },
299
+ {
300
+ "entity_id": "movie_02",
301
+ "average_rating": None,
302
+ "title": None,
303
+ "genres": None,
304
+ },
305
+ ]
306
+ expected_movie_entity_views_df_before_ingest = pd.DataFrame(
307
+ data=expected_data_before_ingest, columns=read_feature_ids
308
+ )
309
+
310
+ movie_entity_views_df_before_ingest.equals(
311
+ expected_movie_entity_views_df_before_ingest
312
+ )
313
+
314
+ movies_df = pd.DataFrame(
315
+ data=[
316
+ {
317
+ "movie_id": "movie_01",
318
+ "average_rating": 4.9,
319
+ "title": "The Shawshank Redemption",
320
+ "genres": ["Drama"],
321
+ "update_time": "2021-08-20 20:44:11.094375+00:00",
322
+ },
323
+ {
324
+ "movie_id": "movie_02",
325
+ "average_rating": 4.2,
326
+ "title": "The Shining",
327
+ "genres": ["Horror"],
328
+ "update_time": "2021-08-20 20:44:11.094375+00:00",
329
+ },
330
+ ],
331
+ columns=["movie_id", "average_rating", "title", "genres", "update_time"],
332
+ )
333
+ movies_df["update_time"] = pd.to_datetime(movies_df["update_time"], utc=True)
334
+ feature_time_column = "update_time"
335
+
336
+ movie_entity_type.ingest_from_df(
337
+ feature_ids=[
338
+ _TEST_MOVIE_TITLE_FEATURE_ID,
339
+ _TEST_MOVIE_GENRES_FEATURE_ID,
340
+ _TEST_MOVIE_AVERAGE_RATING_FEATURE_ID,
341
+ ],
342
+ feature_time=feature_time_column,
343
+ df_source=movies_df,
344
+ entity_id_field="movie_id",
345
+ )
346
+
347
+ movie_entity_views_df_after_ingest = movie_entity_type.read(
348
+ entity_ids=["movie_01", "movie_02"],
349
+ feature_ids=read_feature_ids,
350
+ )
351
+ expected_data_after_ingest = [
352
+ {
353
+ "movie_id": "movie_01",
354
+ "average_rating": 4.9,
355
+ "title": "The Shawshank Redemption",
356
+ "genres": ["Drama"],
357
+ },
358
+ {
359
+ "movie_id": "movie_02",
360
+ "average_rating": 4.2,
361
+ "title": "The Shining",
362
+ "genres": ["Horror"],
363
+ },
364
+ ]
365
+ expected_movie_entity_views_df_after_ingest = pd.DataFrame(
366
+ data=expected_data_after_ingest, columns=read_feature_ids
367
+ )
368
+
369
+ movie_entity_views_df_after_ingest.equals(
370
+ expected_movie_entity_views_df_after_ingest
371
+ )
372
+
373
+ assert "EntityType feature values imported." in caplog.text
374
+ caplog.clear()
375
+
376
+ def test_ingest_feature_values_from_df_using_feature_time_datetime_and_online_read_single_entity(
377
+ self, shared_state, caplog
378
+ ):
379
+ assert shared_state["movie_entity_type"]
380
+ movie_entity_type = shared_state["movie_entity_type"]
381
+
382
+ caplog.set_level(logging.INFO)
383
+
384
+ aiplatform.init(
385
+ project=e2e_base._PROJECT,
386
+ location=e2e_base._LOCATION,
387
+ )
388
+
389
+ movies_df = pd.DataFrame(
390
+ data=[
391
+ {
392
+ "movie_id": "movie_03",
393
+ "average_rating": 4.5,
394
+ "title": "Cinema Paradiso",
395
+ "genres": ["Romance"],
396
+ },
397
+ {
398
+ "movie_id": "movie_04",
399
+ "average_rating": 4.6,
400
+ "title": "The Dark Knight",
401
+ "genres": ["Action"],
402
+ },
403
+ ],
404
+ columns=["movie_id", "average_rating", "title", "genres"],
405
+ )
406
+
407
+ feature_time_datetime_str = datetime.datetime.now().isoformat(
408
+ sep=" ", timespec="milliseconds"
409
+ )
410
+ feature_time_datetime = datetime.datetime.strptime(
411
+ feature_time_datetime_str, "%Y-%m-%d %H:%M:%S.%f"
412
+ )
413
+
414
+ movie_entity_type.ingest_from_df(
415
+ feature_ids=[
416
+ _TEST_MOVIE_TITLE_FEATURE_ID,
417
+ _TEST_MOVIE_GENRES_FEATURE_ID,
418
+ _TEST_MOVIE_AVERAGE_RATING_FEATURE_ID,
419
+ ],
420
+ feature_time=feature_time_datetime,
421
+ df_source=movies_df,
422
+ entity_id_field="movie_id",
423
+ )
424
+
425
+ movie_entity_views_df_avg_rating = movie_entity_type.read(
426
+ entity_ids="movie_04",
427
+ feature_ids="average_rating",
428
+ )
429
+ expected_data_avg_rating = [
430
+ {"movie_id": "movie_04", "average_rating": 4.6},
431
+ ]
432
+ expected_movie_entity_views_df_avg_rating = pd.DataFrame(
433
+ data=expected_data_avg_rating, columns=["average_rating"]
434
+ )
435
+
436
+ movie_entity_views_df_avg_rating.equals(
437
+ expected_movie_entity_views_df_avg_rating
438
+ )
439
+
440
+ assert "EntityType feature values imported." in caplog.text
441
+
442
+ caplog.clear()
443
+
444
+ def test_write_features(self, shared_state, caplog):
445
+ assert shared_state["movie_entity_type"]
446
+ movie_entity_type = shared_state["movie_entity_type"]
447
+
448
+ caplog.set_level(logging.INFO)
449
+
450
+ aiplatform.init(
451
+ project=e2e_base._PROJECT,
452
+ location=e2e_base._LOCATION,
453
+ )
454
+
455
+ # Create pandas DataFrame
456
+ movies_df = pd.DataFrame(
457
+ data=[
458
+ {
459
+ "entity_id": "movie_01",
460
+ "average_rating": 4.9,
461
+ "title": "The Shawshank Redemption",
462
+ "genres": ["Drama", "Action"],
463
+ },
464
+ {
465
+ "entity_id": "movie_02",
466
+ "average_rating": 4.4,
467
+ "title": "The Shining",
468
+ "genres": ["Horror", "Action"],
469
+ },
470
+ ],
471
+ columns=["entity_id", "average_rating", "title", "genres"],
472
+ )
473
+ movies_df = movies_df.set_index("entity_id")
474
+
475
+ # Write feature values
476
+ movie_entity_type.preview.write_feature_values(instances=movies_df)
477
+ movie_entity_type.write_feature_values(
478
+ instances={"movie_02": {"average_rating": 4.5}}
479
+ )
480
+
481
+ # Ensure writing feature values overwrites previous values
482
+ movie_entity_df_avg_rating_genres = movie_entity_type.read(
483
+ entity_ids="movie_02", feature_ids=["average_rating", "genres"]
484
+ )
485
+ expected_data_avg_rating = [
486
+ {
487
+ "entity_id": "movie_02",
488
+ "average_rating": 4.5,
489
+ "genres": ["Horror", "Action"],
490
+ },
491
+ ]
492
+ expected_movie_entity_df_avg_rating_genres = pd.DataFrame(
493
+ data=expected_data_avg_rating,
494
+ columns=["entity_id", "average_rating", "genres"],
495
+ )
496
+ expected_movie_entity_df_avg_rating_genres.equals(
497
+ movie_entity_df_avg_rating_genres
498
+ )
499
+
500
+ assert "EntityType feature values written." in caplog.text
501
+
502
+ caplog.clear()
503
+
504
+ def test_search_features(self, shared_state):
505
+
506
+ aiplatform.init(
507
+ project=e2e_base._PROJECT,
508
+ location=e2e_base._LOCATION,
509
+ )
510
+
511
+ list_searched_features = aiplatform.Feature.search()
512
+ assert len(list_searched_features) >= 1
513
+
514
+ def test_batch_serve_to_df(self, shared_state, caplog):
515
+
516
+ assert shared_state["featurestore"]
517
+ assert shared_state["user_age_feature_resource_name"]
518
+ assert shared_state["user_gender_feature_resource_name"]
519
+ assert shared_state["user_liked_genres_feature_resource_name"]
520
+
521
+ featurestore = shared_state["featurestore"]
522
+
523
+ user_age_feature_resource_name = shared_state["user_age_feature_resource_name"]
524
+ user_gender_feature_resource_name = shared_state[
525
+ "user_gender_feature_resource_name"
526
+ ]
527
+ user_liked_genres_feature_resource_name = shared_state[
528
+ "user_liked_genres_feature_resource_name"
529
+ ]
530
+
531
+ aiplatform.init(
532
+ project=e2e_base._PROJECT,
533
+ location=e2e_base._LOCATION,
534
+ )
535
+
536
+ caplog.set_level(logging.INFO)
537
+
538
+ read_instances_df = pd.DataFrame(
539
+ data=[
540
+ ["alice", "movie_01", "2021-09-15T08:28:14Z"],
541
+ ["bob", "movie_02", "2021-09-15T08:28:14Z"],
542
+ ["dav", "movie_03", "2021-09-15T08:28:14Z"],
543
+ ["eve", "movie_04", "2021-09-15T08:28:14Z"],
544
+ ["alice", "movie_03", "2021-09-14T09:35:15Z"],
545
+ ["bob", "movie_04", "2020-02-14T09:35:15Z"],
546
+ ],
547
+ columns=["users", "movies", "timestamp"],
548
+ )
549
+ read_instances_df["timestamp"] = pd.to_datetime(
550
+ read_instances_df["timestamp"], utc=True
551
+ )
552
+
553
+ df = featurestore.batch_serve_to_df(
554
+ serving_feature_ids={
555
+ _TEST_USER_ENTITY_TYPE_ID: [
556
+ _TEST_USER_AGE_FEATURE_ID,
557
+ _TEST_USER_GENDER_FEATURE_ID,
558
+ _TEST_USER_LIKED_GENRES_FEATURE_ID,
559
+ ],
560
+ _TEST_MOVIE_ENTITY_TYPE_ID: [
561
+ _TEST_MOVIE_TITLE_FEATURE_ID,
562
+ _TEST_MOVIE_GENRES_FEATURE_ID,
563
+ _TEST_MOVIE_AVERAGE_RATING_FEATURE_ID,
564
+ ],
565
+ },
566
+ read_instances_df=read_instances_df,
567
+ feature_destination_fields={
568
+ user_age_feature_resource_name: "user_age_dest",
569
+ user_gender_feature_resource_name: "user_gender_dest",
570
+ user_liked_genres_feature_resource_name: "user_liked_genres_dest",
571
+ },
572
+ )
573
+
574
+ expected_df_columns = [
575
+ "timestamp",
576
+ "entity_type_users",
577
+ "user_age_dest",
578
+ "user_gender_dest",
579
+ "user_liked_genres_dest",
580
+ "entity_type_movies",
581
+ "title",
582
+ "genres",
583
+ "average_rating",
584
+ ]
585
+
586
+ assert isinstance(df, pd.DataFrame)
587
+ assert list(df.columns) == expected_df_columns
588
+ assert df.size == 54
589
+ assert "Featurestore feature values served." in caplog.text
590
+
591
+ caplog.clear()
592
+
593
+ def test_batch_serve_to_gcs(self, shared_state, caplog):
594
+
595
+ assert shared_state["featurestore"]
596
+ assert shared_state["bucket"]
597
+ assert shared_state["user_age_feature_resource_name"]
598
+ assert shared_state["user_gender_feature_resource_name"]
599
+ assert shared_state["user_liked_genres_feature_resource_name"]
600
+
601
+ featurestore = shared_state["featurestore"]
602
+ bucket_name = shared_state["staging_bucket_name"]
603
+ user_age_feature_resource_name = shared_state["user_age_feature_resource_name"]
604
+ user_gender_feature_resource_name = shared_state[
605
+ "user_gender_feature_resource_name"
606
+ ]
607
+ user_liked_genres_feature_resource_name = shared_state[
608
+ "user_liked_genres_feature_resource_name"
609
+ ]
610
+
611
+ aiplatform.init(
612
+ project=e2e_base._PROJECT,
613
+ location=e2e_base._LOCATION,
614
+ )
615
+
616
+ caplog.set_level(logging.INFO)
617
+
618
+ featurestore.batch_serve_to_gcs(
619
+ serving_feature_ids={
620
+ _TEST_USER_ENTITY_TYPE_ID: [
621
+ _TEST_USER_AGE_FEATURE_ID,
622
+ _TEST_USER_GENDER_FEATURE_ID,
623
+ _TEST_USER_LIKED_GENRES_FEATURE_ID,
624
+ ],
625
+ _TEST_MOVIE_ENTITY_TYPE_ID: [
626
+ _TEST_MOVIE_TITLE_FEATURE_ID,
627
+ _TEST_MOVIE_GENRES_FEATURE_ID,
628
+ _TEST_MOVIE_AVERAGE_RATING_FEATURE_ID,
629
+ ],
630
+ },
631
+ read_instances_uri=_TEST_READ_INSTANCE_SRC,
632
+ feature_destination_fields={
633
+ user_age_feature_resource_name: "user_age_dest",
634
+ user_gender_feature_resource_name: "user_gender_dest",
635
+ user_liked_genres_feature_resource_name: "user_liked_genres_dest",
636
+ },
637
+ gcs_destination_output_uri_prefix=f"gs://{bucket_name}/featurestore_test/tfrecord",
638
+ gcs_destination_type="tfrecord",
639
+ )
640
+ assert "Featurestore feature values served." in caplog.text
641
+
642
+ caplog.clear()
643
+
644
+ def test_batch_serve_to_bq(self, shared_state, caplog):
645
+
646
+ assert shared_state["featurestore"]
647
+ assert shared_state["bigquery_dataset"]
648
+ assert shared_state["user_age_feature_resource_name"]
649
+ assert shared_state["user_gender_feature_resource_name"]
650
+ assert shared_state["user_liked_genres_feature_resource_name"]
651
+
652
+ featurestore = shared_state["featurestore"]
653
+ bigquery_dataset_id = shared_state["bigquery_dataset_id"]
654
+ user_age_feature_resource_name = shared_state["user_age_feature_resource_name"]
655
+ user_gender_feature_resource_name = shared_state[
656
+ "user_gender_feature_resource_name"
657
+ ]
658
+ user_liked_genres_feature_resource_name = shared_state[
659
+ "user_liked_genres_feature_resource_name"
660
+ ]
661
+
662
+ aiplatform.init(
663
+ project=e2e_base._PROJECT,
664
+ location=e2e_base._LOCATION,
665
+ )
666
+
667
+ caplog.set_level(logging.INFO)
668
+
669
+ featurestore.batch_serve_to_bq(
670
+ serving_feature_ids={
671
+ _TEST_USER_ENTITY_TYPE_ID: [
672
+ _TEST_USER_AGE_FEATURE_ID,
673
+ _TEST_USER_GENDER_FEATURE_ID,
674
+ _TEST_USER_LIKED_GENRES_FEATURE_ID,
675
+ ],
676
+ _TEST_MOVIE_ENTITY_TYPE_ID: [
677
+ _TEST_MOVIE_TITLE_FEATURE_ID,
678
+ _TEST_MOVIE_GENRES_FEATURE_ID,
679
+ _TEST_MOVIE_AVERAGE_RATING_FEATURE_ID,
680
+ ],
681
+ },
682
+ read_instances_uri=_TEST_READ_INSTANCE_SRC,
683
+ feature_destination_fields={
684
+ user_age_feature_resource_name: "user_age_dest",
685
+ user_gender_feature_resource_name: "user_gender_dest",
686
+ user_liked_genres_feature_resource_name: "user_liked_genres_dest",
687
+ },
688
+ bq_destination_output_uri=f"bq://{bigquery_dataset_id}.test_table",
689
+ )
690
+
691
+ assert "Featurestore feature values served." in caplog.text
692
+ caplog.clear()
693
+
694
+ def test_online_reads(self, shared_state):
695
+ assert shared_state["user_entity_type"]
696
+ assert shared_state["movie_entity_type"]
697
+
698
+ user_entity_type = shared_state["user_entity_type"]
699
+ movie_entity_type = shared_state["movie_entity_type"]
700
+
701
+ user_entity_views = user_entity_type.read(entity_ids="alice")
702
+ assert isinstance(user_entity_views, pd.DataFrame)
703
+
704
+ movie_entity_views = movie_entity_type.read(
705
+ entity_ids=["movie_01", "movie_04"],
706
+ feature_ids=[_TEST_MOVIE_TITLE_FEATURE_ID, _TEST_MOVIE_GENRES_FEATURE_ID],
707
+ )
708
+ assert isinstance(movie_entity_views, pd.DataFrame)
709
+
710
+ movie_entity_views = movie_entity_type.read(
711
+ entity_ids="movie_01",
712
+ feature_ids=[_TEST_MOVIE_TITLE_FEATURE_ID, _TEST_MOVIE_GENRES_FEATURE_ID],
713
+ )
714
+ assert isinstance(movie_entity_views, pd.DataFrame)
testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_initializer.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # Copyright 2023 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ #
17
+
18
+ import pytest
19
+
20
+ from google.auth import credentials as auth_credentials
21
+
22
+ from google.cloud import aiplatform
23
+ from google.cloud.aiplatform import initializer as aiplatform_initializer
24
+ from tests.system.aiplatform import e2e_base
25
+
26
+
27
+ class TestInitializer(e2e_base.TestEndToEnd):
28
+ """Tests the _set_google_auth_default() functionality in initializer._Config."""
29
+
30
+ _temp_prefix = "test_initializer_"
31
+
32
+ def test_init_calls_set_google_auth_default(self):
33
+ aiplatform.init(project=e2e_base._PROJECT)
34
+
35
+ # init() with only creds shouldn't overwrite the project
36
+ creds = auth_credentials.AnonymousCredentials()
37
+ aiplatform.init(credentials=creds)
38
+
39
+ assert aiplatform.initializer.global_config.project == e2e_base._PROJECT
40
+ assert aiplatform.initializer.global_config.credentials == creds
41
+
42
+ # init() with only project shouldn't overwrite creds
43
+ aiplatform.init(project=e2e_base._PROJECT)
44
+ assert aiplatform.initializer.global_config.credentials == creds
45
+
46
+ def test_init_rest_async_incorrect_credentials(self):
47
+ # Async REST credentials must be explicitly set using
48
+ # _set_async_rest_credentials() for async REST transport.
49
+ creds = auth_credentials.AnonymousCredentials()
50
+ aiplatform.init(
51
+ project=e2e_base._PROJECT,
52
+ location=e2e_base._LOCATION,
53
+ api_transport="rest",
54
+ )
55
+
56
+ # System tests are run on Python 3.10 which has async deps.
57
+ with pytest.raises(ValueError):
58
+ # Expect a ValueError for passing in sync credentials.
59
+ aiplatform_initializer._set_async_rest_credentials(credentials=creds)
testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_language_models.py ADDED
@@ -0,0 +1,559 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # Copyright 2023 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ #
17
+
18
+ # pylint: disable=protected-access, g-multiple-import
19
+
20
+ import pytest
21
+
22
+ from google import auth
23
+ from google.cloud import aiplatform
24
+ from google.cloud.aiplatform.compat.types import (
25
+ job_state as gca_job_state,
26
+ )
27
+ from tests.system.aiplatform import e2e_base
28
+ from google.cloud.aiplatform.utils import gcs_utils
29
+ from vertexai import language_models
30
+ from vertexai.preview import (
31
+ language_models as preview_language_models,
32
+ )
33
+ from vertexai.preview.language_models import (
34
+ ChatModel,
35
+ CodeGenerationModel,
36
+ InputOutputTextPair,
37
+ TextGenerationModel,
38
+ TextGenerationResponse,
39
+ TextEmbeddingModel,
40
+ )
41
+
42
+ STAGING_DIR_URI = "gs://ucaip-samples-us-central1/tmp/staging"
43
+
44
+
45
+ class TestLanguageModels(e2e_base.TestEndToEnd):
46
+ """System tests for language models."""
47
+
48
+ _temp_prefix = "temp_language_models_test_"
49
+
50
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
51
+ def test_text_generation(self, api_transport):
52
+ aiplatform.init(
53
+ project=e2e_base._PROJECT,
54
+ location=e2e_base._LOCATION,
55
+ api_transport=api_transport,
56
+ )
57
+
58
+ model = TextGenerationModel.from_pretrained("google/text-bison@001")
59
+ grounding_source = language_models.GroundingSource.WebSearch()
60
+ response = model.predict(
61
+ "What is the best recipe for cupcakes? Recipe:",
62
+ max_output_tokens=128,
63
+ temperature=0.0,
64
+ top_p=1.0,
65
+ top_k=5,
66
+ stop_sequences=["# %%"],
67
+ grounding_source=grounding_source,
68
+ )
69
+ assert response.text or response.is_blocked
70
+
71
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
72
+ def test_text_generation_preview_count_tokens(self, api_transport):
73
+ aiplatform.init(
74
+ project=e2e_base._PROJECT,
75
+ location=e2e_base._LOCATION,
76
+ api_transport=api_transport,
77
+ )
78
+
79
+ model = preview_language_models.TextGenerationModel.from_pretrained(
80
+ "google/text-bison@001"
81
+ )
82
+
83
+ response = model.count_tokens(["How are you doing?"])
84
+
85
+ assert response.total_tokens
86
+ assert response.total_billable_characters
87
+
88
+ @pytest.mark.asyncio
89
+ async def test_text_generation_model_predict_async(self):
90
+ aiplatform.init(
91
+ project=e2e_base._PROJECT,
92
+ location=e2e_base._LOCATION,
93
+ )
94
+
95
+ model = TextGenerationModel.from_pretrained("google/text-bison@001")
96
+ grounding_source = language_models.GroundingSource.WebSearch()
97
+ response = await model.predict_async(
98
+ "What is the best recipe for cupcakes? Recipe:",
99
+ max_output_tokens=128,
100
+ temperature=0.0,
101
+ top_p=1.0,
102
+ top_k=5,
103
+ stop_sequences=["# %%"],
104
+ grounding_source=grounding_source,
105
+ )
106
+ assert response.text or response.is_blocked
107
+
108
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
109
+ def test_text_generation_streaming(self, api_transport):
110
+ aiplatform.init(
111
+ project=e2e_base._PROJECT,
112
+ location=e2e_base._LOCATION,
113
+ api_transport=api_transport,
114
+ )
115
+
116
+ model = TextGenerationModel.from_pretrained("google/text-bison@001")
117
+
118
+ for response in model.predict_streaming(
119
+ "What is the best recipe for cupcakes? Recipe:",
120
+ max_output_tokens=128,
121
+ temperature=0.0,
122
+ top_p=1.0,
123
+ top_k=5,
124
+ ):
125
+ assert response.text or response.is_blocked
126
+
127
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
128
+ def test_preview_text_generation_from_pretrained(self, api_transport):
129
+ aiplatform.init(
130
+ project=e2e_base._PROJECT,
131
+ location=e2e_base._LOCATION,
132
+ api_transport=api_transport,
133
+ )
134
+
135
+ model = preview_language_models.TextGenerationModel.from_pretrained(
136
+ "google/text-bison@001"
137
+ )
138
+
139
+ response = model.predict(
140
+ "What is the best recipe for cupcakes? Recipe:",
141
+ max_output_tokens=128,
142
+ temperature=0.0,
143
+ top_p=1.0,
144
+ top_k=5,
145
+ stop_sequences=["# %%"],
146
+ )
147
+ assert response.text or response.is_blocked
148
+
149
+ assert isinstance(model, preview_language_models.TextGenerationModel)
150
+
151
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
152
+ def test_chat_on_chat_model(self, api_transport):
153
+ aiplatform.init(
154
+ project=e2e_base._PROJECT,
155
+ location=e2e_base._LOCATION,
156
+ api_transport=api_transport,
157
+ )
158
+ chat_model = ChatModel.from_pretrained("google/chat-bison@001")
159
+ grounding_source = language_models.GroundingSource.WebSearch()
160
+ chat = chat_model.start_chat(
161
+ context="My name is Ned. You are my personal assistant. My favorite movies are Lord of the Rings and Hobbit.",
162
+ examples=[
163
+ InputOutputTextPair(
164
+ input_text="Who do you work for?",
165
+ output_text="I work for Ned.",
166
+ ),
167
+ InputOutputTextPair(
168
+ input_text="What do I like?",
169
+ output_text="Ned likes watching movies.",
170
+ ),
171
+ ],
172
+ temperature=0.0,
173
+ stop_sequences=["# %%"],
174
+ )
175
+
176
+ message1 = "Are my favorite movies based on a book series?"
177
+ response1 = chat.send_message(
178
+ message1,
179
+ grounding_source=grounding_source,
180
+ )
181
+ assert response1.text
182
+ assert response1.grounding_metadata
183
+ assert len(chat.message_history) == 2
184
+ assert chat.message_history[0].author == chat.USER_AUTHOR
185
+ assert chat.message_history[0].content == message1
186
+ assert chat.message_history[1].author == chat.MODEL_AUTHOR
187
+
188
+ message2 = "When were these books published?"
189
+ response2 = chat.send_message(
190
+ message2, temperature=0.1, grounding_source=grounding_source
191
+ )
192
+ assert response2.text
193
+ assert response2.grounding_metadata
194
+ assert len(chat.message_history) == 4
195
+ assert chat.message_history[2].author == chat.USER_AUTHOR
196
+ assert chat.message_history[2].content == message2
197
+ assert chat.message_history[3].author == chat.MODEL_AUTHOR
198
+
199
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
200
+ def test_chat_model_preview_count_tokens(self, api_transport):
201
+ aiplatform.init(
202
+ project=e2e_base._PROJECT,
203
+ location=e2e_base._LOCATION,
204
+ api_transport=api_transport,
205
+ )
206
+
207
+ chat_model = ChatModel.from_pretrained("google/chat-bison@001")
208
+
209
+ chat = chat_model.start_chat()
210
+
211
+ chat.send_message("What should I do today?")
212
+
213
+ response_with_history = chat.count_tokens("Any ideas?")
214
+
215
+ response_without_history = chat_model.start_chat().count_tokens(
216
+ "What should I do today?"
217
+ )
218
+
219
+ assert (
220
+ response_with_history.total_tokens > response_without_history.total_tokens
221
+ )
222
+ assert (
223
+ response_with_history.total_billable_characters
224
+ > response_without_history.total_billable_characters
225
+ )
226
+
227
+ @pytest.mark.asyncio
228
+ async def test_chat_model_async(self):
229
+ aiplatform.init(
230
+ project=e2e_base._PROJECT,
231
+ location=e2e_base._LOCATION,
232
+ )
233
+
234
+ chat_model = ChatModel.from_pretrained("google/chat-bison@001")
235
+ grounding_source = language_models.GroundingSource.WebSearch()
236
+ chat = chat_model.start_chat(
237
+ context="My name is Ned. You are my personal assistant. My favorite movies are Lord of the Rings and Hobbit.",
238
+ examples=[
239
+ InputOutputTextPair(
240
+ input_text="Who do you work for?",
241
+ output_text="I work for Ned.",
242
+ ),
243
+ InputOutputTextPair(
244
+ input_text="What do I like?",
245
+ output_text="Ned likes watching movies.",
246
+ ),
247
+ ],
248
+ temperature=0.0,
249
+ stop_sequences=["# %%"],
250
+ )
251
+
252
+ message1 = "Are my favorite movies based on a book series?"
253
+ response1 = await chat.send_message_async(
254
+ message1,
255
+ grounding_source=grounding_source,
256
+ )
257
+ assert response1.text
258
+ assert response1.grounding_metadata
259
+ assert len(chat.message_history) == 2
260
+ assert chat.message_history[0].author == chat.USER_AUTHOR
261
+ assert chat.message_history[0].content == message1
262
+ assert chat.message_history[1].author == chat.MODEL_AUTHOR
263
+
264
+ message2 = "When were these books published?"
265
+ response2 = await chat.send_message_async(
266
+ message2,
267
+ temperature=0.1,
268
+ grounding_source=grounding_source,
269
+ )
270
+ assert response2.text
271
+ assert response2.grounding_metadata
272
+ assert len(chat.message_history) == 4
273
+ assert chat.message_history[2].author == chat.USER_AUTHOR
274
+ assert chat.message_history[2].content == message2
275
+ assert chat.message_history[3].author == chat.MODEL_AUTHOR
276
+
277
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
278
+ def test_chat_model_send_message_streaming(self, api_transport):
279
+ aiplatform.init(
280
+ project=e2e_base._PROJECT,
281
+ location=e2e_base._LOCATION,
282
+ api_transport=api_transport,
283
+ )
284
+
285
+ chat_model = ChatModel.from_pretrained("google/chat-bison@001")
286
+ chat = chat_model.start_chat(
287
+ context="My name is Ned. You are my personal assistant. My favorite movies are Lord of the Rings and Hobbit.",
288
+ examples=[
289
+ InputOutputTextPair(
290
+ input_text="Who do you work for?",
291
+ output_text="I work for Ned.",
292
+ ),
293
+ InputOutputTextPair(
294
+ input_text="What do I like?",
295
+ output_text="Ned likes watching movies.",
296
+ ),
297
+ ],
298
+ temperature=0.0,
299
+ )
300
+
301
+ message1 = "Are my favorite movies based on a book series?"
302
+ for response in chat.send_message_streaming(message1):
303
+ assert isinstance(response, TextGenerationResponse)
304
+ assert len(chat.message_history) == 2
305
+ assert chat.message_history[0].author == chat.USER_AUTHOR
306
+ assert chat.message_history[0].content == message1
307
+ assert chat.message_history[1].author == chat.MODEL_AUTHOR
308
+
309
+ message2 = "When were these books published?"
310
+ for response2 in chat.send_message_streaming(
311
+ message2,
312
+ temperature=0.1,
313
+ ):
314
+ assert isinstance(response2, TextGenerationResponse)
315
+ assert len(chat.message_history) == 4
316
+ assert chat.message_history[2].author == chat.USER_AUTHOR
317
+ assert chat.message_history[2].content == message2
318
+ assert chat.message_history[3].author == chat.MODEL_AUTHOR
319
+
320
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
321
+ def test_text_embedding(self, api_transport):
322
+ aiplatform.init(
323
+ project=e2e_base._PROJECT,
324
+ location=e2e_base._LOCATION,
325
+ api_transport=api_transport,
326
+ )
327
+
328
+ model = TextEmbeddingModel.from_pretrained("google/textembedding-gecko@001")
329
+ # One short text, one llong text (to check truncation)
330
+ texts = ["What is life?", "What is life?" * 1000]
331
+ embeddings = model.get_embeddings(texts)
332
+ assert len(embeddings) == 2
333
+ assert len(embeddings[0].values) == 768
334
+ assert embeddings[0].statistics.token_count > 0
335
+ assert not embeddings[0].statistics.truncated
336
+
337
+ assert len(embeddings[1].values) == 768
338
+ assert embeddings[1].statistics.token_count > 1000
339
+ assert embeddings[1].statistics.truncated
340
+
341
+ @pytest.mark.asyncio
342
+ async def test_text_embedding_async(self):
343
+ aiplatform.init(
344
+ project=e2e_base._PROJECT,
345
+ location=e2e_base._LOCATION,
346
+ )
347
+
348
+ model = TextEmbeddingModel.from_pretrained("google/textembedding-gecko@001")
349
+ # One short text, one llong text (to check truncation)
350
+ texts = ["What is life?", "What is life?" * 1000]
351
+ embeddings = await model.get_embeddings_async(texts)
352
+ assert len(embeddings) == 2
353
+ assert len(embeddings[0].values) == 768
354
+ assert embeddings[0].statistics.token_count > 0
355
+ assert not embeddings[0].statistics.truncated
356
+
357
+ assert len(embeddings[1].values) == 768
358
+ assert embeddings[1].statistics.token_count > 1000
359
+ assert embeddings[1].statistics.truncated
360
+
361
+ # TODO(b/339907038): Re-enable test after timeout issue is fixed.
362
+ @pytest.mark.skip(reason="Causes system tests timeout")
363
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
364
+ def test_tuning(self, shared_state, api_transport):
365
+ """Test tuning, listing and loading models."""
366
+ credentials, _ = auth.default(
367
+ scopes=["https://www.googleapis.com/auth/cloud-platform"]
368
+ )
369
+ aiplatform.init(
370
+ project=e2e_base._PROJECT,
371
+ location=e2e_base._LOCATION,
372
+ credentials=credentials,
373
+ api_transport=api_transport,
374
+ )
375
+
376
+ model = language_models.TextGenerationModel.from_pretrained("text-bison@001")
377
+
378
+ import pandas
379
+
380
+ training_data = pandas.DataFrame(
381
+ data=[
382
+ {"input_text": "Input 0", "output_text": "Output 0"},
383
+ {"input_text": "Input 1", "output_text": "Output 1"},
384
+ {"input_text": "Input 2", "output_text": "Output 2"},
385
+ {"input_text": "Input 3", "output_text": "Output 3"},
386
+ {"input_text": "Input 4", "output_text": "Output 4"},
387
+ {"input_text": "Input 5", "output_text": "Output 5"},
388
+ {"input_text": "Input 6", "output_text": "Output 6"},
389
+ {"input_text": "Input 7", "output_text": "Output 7"},
390
+ {"input_text": "Input 8", "output_text": "Output 8"},
391
+ {"input_text": "Input 9", "output_text": "Output 9"},
392
+ ]
393
+ )
394
+
395
+ dataset_uri = (
396
+ STAGING_DIR_URI + "/veretx_llm_tuning_training_data.text-bison.dummy.jsonl"
397
+ )
398
+ gcs_utils._upload_pandas_df_to_gcs(
399
+ df=training_data, upload_gcs_path=dataset_uri
400
+ )
401
+
402
+ tuning_job = model.tune_model(
403
+ training_data=training_data,
404
+ train_steps=1,
405
+ tuning_job_location="europe-west4",
406
+ tuned_model_location="us-central1",
407
+ learning_rate_multiplier=2.0,
408
+ tuning_evaluation_spec=preview_language_models.TuningEvaluationSpec(
409
+ evaluation_data=dataset_uri,
410
+ evaluation_interval=37,
411
+ enable_early_stopping=True,
412
+ ),
413
+ )
414
+ tuned_model1 = tuning_job.get_tuned_model()
415
+
416
+ # According to the Pipelines design, external resources created by a pipeline
417
+ # must not be modified or deleted. Otherwise caching will break next pipeline runs.
418
+ shared_state.setdefault("resources", [])
419
+ shared_state["resources"].append(tuned_model1._endpoint)
420
+ shared_state["resources"].extend(
421
+ aiplatform.Model(model_name=deployed_model.model)
422
+ for deployed_model in tuned_model1._endpoint.list_models()
423
+ )
424
+ # Deleting the Endpoint is a little less bad since the LLM SDK will recreate it, but it's not advised for the same reason.
425
+
426
+ # Testing the new model returned by the `tuning_job.get_tuned_model` method
427
+ response1 = tuned_model1.predict(
428
+ "What is the best recipe for cupcakes? Recipe:",
429
+ max_output_tokens=128,
430
+ temperature=0.0,
431
+ top_p=1.0,
432
+ top_k=5,
433
+ )
434
+ assert response1.text or response1.is_blocked
435
+
436
+ # Testing listing and getting tuned models
437
+ tuned_model_names = model.list_tuned_model_names()
438
+ assert tuned_model_names
439
+ tuned_model_name = tuned_model_names[0]
440
+
441
+ tuned_model = TextGenerationModel.get_tuned_model(tuned_model_name)
442
+
443
+ tuned_model_response = tuned_model.predict(
444
+ "What is the best recipe for cupcakes? Recipe:",
445
+ max_output_tokens=128,
446
+ temperature=0.0,
447
+ top_p=1.0,
448
+ top_k=5,
449
+ )
450
+ assert tuned_model_response.text or tuned_model_response.is_blocked
451
+
452
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
453
+ def test_batch_prediction_for_text_generation(self, api_transport):
454
+ source_uri = "gs://ucaip-samples-us-central1/model/llm/batch_prediction/batch_prediction_prompts1.jsonl"
455
+ destination_uri_prefix = "gs://ucaip-samples-us-central1/model/llm/batch_prediction/predictions/text-bison@001_"
456
+
457
+ aiplatform.init(
458
+ project=e2e_base._PROJECT,
459
+ location=e2e_base._LOCATION,
460
+ api_transport=api_transport,
461
+ )
462
+
463
+ model = TextGenerationModel.from_pretrained("text-bison@001")
464
+ job = model.batch_predict(
465
+ dataset=source_uri,
466
+ destination_uri_prefix=destination_uri_prefix,
467
+ model_parameters={"temperature": 0, "top_p": 1, "top_k": 5},
468
+ )
469
+
470
+ job.wait_for_resource_creation()
471
+ job.wait()
472
+ gapic_job = job._gca_resource
473
+ job.delete()
474
+
475
+ assert gapic_job.state == gca_job_state.JobState.JOB_STATE_SUCCEEDED
476
+
477
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
478
+ def test_batch_prediction_for_textembedding(self, api_transport):
479
+ source_uri = "gs://ucaip-samples-us-central1/model/llm/batch_prediction/batch_prediction_prompts_textembedding_dummy1.jsonl"
480
+ destination_uri_prefix = "gs://ucaip-samples-us-central1/model/llm/batch_prediction/predictions/textembedding-gecko@001_"
481
+
482
+ aiplatform.init(
483
+ project=e2e_base._PROJECT,
484
+ location=e2e_base._LOCATION,
485
+ api_transport=api_transport,
486
+ )
487
+
488
+ model = TextEmbeddingModel.from_pretrained("textembedding-gecko@001")
489
+ job = model.batch_predict(
490
+ dataset=source_uri,
491
+ destination_uri_prefix=destination_uri_prefix,
492
+ model_parameters={},
493
+ )
494
+
495
+ job.wait_for_resource_creation()
496
+ job.wait()
497
+ gapic_job = job._gca_resource
498
+ job.delete()
499
+
500
+ assert gapic_job.state == gca_job_state.JobState.JOB_STATE_SUCCEEDED
501
+
502
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
503
+ def test_batch_prediction_for_code_generation(self, api_transport):
504
+ source_uri = "gs://ucaip-samples-us-central1/model/llm/batch_prediction/code-bison.batch_prediction_prompts.1.jsonl"
505
+ destination_uri_prefix = "gs://ucaip-samples-us-central1/model/llm/batch_prediction/predictions/code-bison@001_"
506
+
507
+ aiplatform.init(
508
+ project=e2e_base._PROJECT,
509
+ location=e2e_base._LOCATION,
510
+ api_transport=api_transport,
511
+ )
512
+
513
+ model = CodeGenerationModel.from_pretrained("code-bison@001")
514
+ job = model.batch_predict(
515
+ dataset=source_uri,
516
+ destination_uri_prefix=destination_uri_prefix,
517
+ model_parameters={"temperature": 0},
518
+ )
519
+
520
+ job.wait_for_resource_creation()
521
+ job.wait()
522
+ gapic_job = job._gca_resource
523
+ job.delete()
524
+
525
+ assert gapic_job.state == gca_job_state.JobState.JOB_STATE_SUCCEEDED
526
+
527
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
528
+ def test_code_generation_streaming(self, api_transport):
529
+ aiplatform.init(
530
+ project=e2e_base._PROJECT,
531
+ location=e2e_base._LOCATION,
532
+ api_transport=api_transport,
533
+ )
534
+
535
+ model = language_models.CodeGenerationModel.from_pretrained("code-bison@001")
536
+
537
+ for response in model.predict_streaming(
538
+ prefix="def reverse_string(s):",
539
+ # code-bison does not support suffix
540
+ # suffix=" return s",
541
+ max_output_tokens=128,
542
+ temperature=0.0,
543
+ ):
544
+ assert response.text
545
+
546
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
547
+ def test_code_chat_model_send_message_streaming(self, api_transport):
548
+ aiplatform.init(
549
+ project=e2e_base._PROJECT,
550
+ location=e2e_base._LOCATION,
551
+ api_transport=api_transport,
552
+ )
553
+
554
+ chat_model = language_models.CodeChatModel.from_pretrained("codechat-bison@001")
555
+ chat = chat_model.start_chat()
556
+
557
+ message1 = "Please help write a function to calculate the max of two numbers"
558
+ for response in chat.send_message_streaming(message1):
559
+ assert response.text