text
stringlengths 2
999k
|
|---|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library that uses the GeoJSONs (from DC KG) to map lat/lng to DC places.
See latlng_recon_geojson_test.py for usage example.
"""
import datacommons as dc
import json
from shapely import geometry
_WORLD = 'Earth'
_USA = 'country/USA'
_GJ_PROP = {
'Country': 'geoJsonCoordinatesDP2',
# Certain low-res geojsons are malformed for states
'State': 'geoJsonCoordinates',
}
def _get_geojsons(place_type, parent_place):
places = dc.get_places_in([parent_place], place_type)[parent_place]
resp = dc.get_property_values(places, _GJ_PROP[place_type])
geojsons = {}
for p, gj in resp.items():
if not gj:
continue
geojsons[p] = geometry.shape(json.loads(gj[0]))
return geojsons
def _get_continent_map(countries):
return dc.get_property_values(countries, 'containedInPlace')
class LatLng2Places:
"""Helper class to map lat/lng to DC places using GeoJSON files.
Right now it only supports: Country, Continent and US States.
"""
def __init__(self):
self._country_geojsons = _get_geojsons('Country', _WORLD)
self._us_state_geojsons = _get_geojsons('State', _USA)
self._continent_map = _get_continent_map(
[k for k in self._country_geojsons])
print('Loaded',
len(self._country_geojsons) + len(self._us_state_geojsons),
'geojsons!')
def resolve(self, lat, lon):
"""Given a lat/long returns a list of place DCIDs that contain it."""
point = geometry.Point(lon, lat)
country = None
for p, gj in self._country_geojsons.items():
if gj.contains(point):
country = p
break
cip = []
if country == _USA:
for p, gj in self._us_state_geojsons.items():
if gj.contains(point):
cip.append(p)
break
if country:
cip.append(country)
cip.extend(self._continent_map[country])
return cip
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Tests for rouge scorer.
Tests for both correctness and for consistency with the official ROUGE-1.5.5
implementation.
"Ground truth" scores are taken from manual runs of ROUGE-1.5.5.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import absltest
from absl.testing import parameterized
from rouge import rouge_scorer
from rouge import test_util
class RougeScorerTest(parameterized.TestCase):
def setUp(self):
super(RougeScorerTest, self).setUp()
with open(test_util.TARGETS_FILE) as f:
self.targets = f.readlines()
with open(test_util.PREDICTIONS_FILE) as f:
self.predictions = f.readlines()
@parameterized.parameters(["rougen", "rouge0", "rouge10"])
def testInvalidRougeTypes(self, rouge_type):
with self.assertRaises(ValueError):
scorer = rouge_scorer.RougeScorer([rouge_type])
scorer.score("testing one two", "testing")
@parameterized.parameters(["rouge1", "rouge9", "rougeL", "rougeLsum"])
def testValidRougeTypes(self, rouge_type):
scorer = rouge_scorer.RougeScorer([rouge_type])
result = scorer.score("testing one two", "testing")
self.assertSameElements(list(result.keys()), [rouge_type])
def testRouge1(self):
scorer = rouge_scorer.RougeScorer(["rouge1"])
result = scorer.score("testing one two", "testing")
self.assertAlmostEqual(1, result["rouge1"].precision)
self.assertAlmostEqual(1 / 3, result["rouge1"].recall)
self.assertAlmostEqual(1 / 2, result["rouge1"].fmeasure)
@parameterized.parameters(["rouge1", "rouge2", "rougeL", "rougeLsum"])
def testRougeEmpty(self, rouge_type):
scorer = rouge_scorer.RougeScorer([rouge_type])
result = scorer.score("testing one two", "")
self.assertAlmostEqual(0, result[rouge_type].precision)
self.assertAlmostEqual(0, result[rouge_type].recall)
self.assertAlmostEqual(0, result[rouge_type].fmeasure)
def testRouge2(self):
scorer = rouge_scorer.RougeScorer(["rouge2"])
result = scorer.score("testing one two", "testing one")
self.assertAlmostEqual(1, result["rouge2"].precision)
self.assertAlmostEqual(1 / 2, result["rouge2"].recall)
self.assertAlmostEqual(2 / 3, result["rouge2"].fmeasure)
def testRougeLConsecutive(self):
scorer = rouge_scorer.RougeScorer(["rougeL"])
result = scorer.score("testing one two", "testing one")
self.assertAlmostEqual(1, result["rougeL"].precision)
self.assertAlmostEqual(2 / 3, result["rougeL"].recall)
self.assertAlmostEqual(4 / 5, result["rougeL"].fmeasure)
def testRougeLNonConsecutive(self):
scorer = rouge_scorer.RougeScorer(["rougeL"])
result = scorer.score("testing one two", "testing two")
self.assertAlmostEqual(1, result["rougeL"].precision)
self.assertAlmostEqual(2 / 3, result["rougeL"].recall)
self.assertAlmostEqual(4 / 5, result["rougeL"].fmeasure)
def testMultipleRougeTypes(self):
scorer = rouge_scorer.RougeScorer(["rouge1", "rougeL"])
result = scorer.score("testing one two", "testing one")
self.assertSameElements(list(result.keys()), ["rouge1", "rougeL"])
self.assertAlmostEqual(1, result["rouge1"].precision)
self.assertAlmostEqual(2 / 3, result["rouge1"].recall)
self.assertAlmostEqual(4 / 5, result["rouge1"].fmeasure)
self.assertAlmostEqual(1, result["rougeL"].precision)
self.assertAlmostEqual(2 / 3, result["rougeL"].recall)
self.assertAlmostEqual(4 / 5, result["rougeL"].fmeasure)
def testRouge1AgainstRouge155(self):
scorer = rouge_scorer.RougeScorer(["rouge1"])
result = scorer.score(self.targets[0], self.predictions[0])
self.assertAlmostEqual(0.40741, result["rouge1"].recall, 5)
self.assertAlmostEqual(0.68750, result["rouge1"].precision, 5)
self.assertAlmostEqual(0.51163, result["rouge1"].fmeasure, 5)
result = scorer.score(self.targets[1], self.predictions[1])
self.assertAlmostEqual(0.40476, result["rouge1"].recall, 5)
self.assertAlmostEqual(0.65385, result["rouge1"].precision, 5)
self.assertAlmostEqual(0.50000, result["rouge1"].fmeasure, 5)
def testRouge1AgainstRouge155WithStemming(self):
scorer = rouge_scorer.RougeScorer(["rouge1"], use_stemmer=True)
result = scorer.score(self.targets[0], self.predictions[0])
self.assertAlmostEqual(0.40741, result["rouge1"].recall, 5)
self.assertAlmostEqual(0.68750, result["rouge1"].precision, 5)
self.assertAlmostEqual(0.51163, result["rouge1"].fmeasure, 5)
result = scorer.score(self.targets[1], self.predictions[1])
self.assertAlmostEqual(0.42857, result["rouge1"].recall, 5)
self.assertAlmostEqual(0.69231, result["rouge1"].precision, 5)
self.assertAlmostEqual(0.52941, result["rouge1"].fmeasure, 5)
def testRouge2AgainstRouge155(self):
scorer = rouge_scorer.RougeScorer(["rouge2"])
result = scorer.score(self.targets[0], self.predictions[0])
self.assertAlmostEqual(0.30769, result["rouge2"].recall, 5)
self.assertAlmostEqual(0.53333, result["rouge2"].precision, 5)
self.assertAlmostEqual(0.39024, result["rouge2"].fmeasure, 5)
result = scorer.score(self.targets[1], self.predictions[1])
self.assertAlmostEqual(0.29268, result["rouge2"].recall, 5)
self.assertAlmostEqual(0.48000, result["rouge2"].precision, 5)
self.assertAlmostEqual(0.36364, result["rouge2"].fmeasure, 5)
def testRouge2AgainstRouge155WithStemming(self):
scorer = rouge_scorer.RougeScorer(["rouge2"], use_stemmer=True)
result = scorer.score(self.targets[0], self.predictions[0])
self.assertAlmostEqual(0.30769, result["rouge2"].recall, 5)
self.assertAlmostEqual(0.53333, result["rouge2"].precision, 5)
self.assertAlmostEqual(0.39024, result["rouge2"].fmeasure, 5)
result = scorer.score(self.targets[1], self.predictions[1])
self.assertAlmostEqual(0.29268, result["rouge2"].recall, 5)
self.assertAlmostEqual(0.48000, result["rouge2"].precision, 5)
self.assertAlmostEqual(0.36364, result["rouge2"].fmeasure, 5)
def testRougeLAgainstRouge155(self):
scorer = rouge_scorer.RougeScorer(["rougeL"])
result = scorer.score(self.targets[0], self.predictions[0])
self.assertAlmostEqual(0.40741, result["rougeL"].recall, 5)
self.assertAlmostEqual(0.68750, result["rougeL"].precision, 5)
self.assertAlmostEqual(0.51163, result["rougeL"].fmeasure, 5)
result = scorer.score(self.targets[1], self.predictions[1])
self.assertAlmostEqual(0.40476, result["rougeL"].recall, 5)
self.assertAlmostEqual(0.65385, result["rougeL"].precision, 5)
self.assertAlmostEqual(0.50000, result["rougeL"].fmeasure, 5)
def testRougeLSumAgainstRouge155WithStemming(self):
scorer = rouge_scorer.RougeScorer(["rougeLsum"], use_stemmer=True)
target = test_util.get_text(
os.path.join(test_util.PYROUGE_DIR, "target_multi.0.txt"))
prediction = test_util.get_text(
os.path.join(test_util.PYROUGE_DIR, "prediction_multi.0.txt"))
result = scorer.score(target, prediction)
self.assertAlmostEqual(0.36538, result["rougeLsum"].recall, places=5)
self.assertAlmostEqual(0.66667, result["rougeLsum"].precision, places=5)
self.assertAlmostEqual(0.47205, result["rougeLsum"].fmeasure, places=5)
def testRougeLSumSentenceSplitting(self):
scorer = rouge_scorer.RougeScorer(["rougeLsum"], use_stemmer=True)
target = "First sentence.\nSecond Sentence."
prediction = "Second sentence.\nFirst Sentence."
result = scorer.score(target, prediction)
self.assertAlmostEqual(1.0, result["rougeLsum"].fmeasure, places=5)
scorer = rouge_scorer.RougeScorer(["rougeLsum"],
use_stemmer=True,
split_summaries=False)
result = scorer.score(target, prediction)
# Without newlines, summaries are treated as single sentences.
target = target.replace("\n", " ")
prediction = prediction.replace("\n", " ")
result = scorer.score(target, prediction)
self.assertAlmostEqual(0.50, result["rougeLsum"].fmeasure, places=5)
# Split summaries into sentences using nltk
scorer = rouge_scorer.RougeScorer(["rougeLsum"],
use_stemmer=True,
split_summaries=True)
result = scorer.score(target, prediction)
self.assertAlmostEqual(1.0, result["rougeLsum"].fmeasure, places=5)
def testLcsTable(self):
ref = [1, 2, 3, 4, 5]
c1 = [2, 5, 3, 4]
t = rouge_scorer._lcs_table(ref, c1)
self.assertEqual(3, t[len(ref)][len(c1)])
def _read_lcs(t, ref, can):
return rouge_scorer._backtrack_norec(t, ref, can)
# Indices
self.assertEqual([1, 2, 3],
_read_lcs(t, ref, c1))
# Values
self.assertEqual([2, 3, 4],
[ref[i] for i in _read_lcs(t, ref, c1)])
# No common subsequence.
c2 = [8, 9]
t = rouge_scorer._lcs_table(ref, c2)
self.assertEqual(0, t[len(ref)][len(c2)])
self.assertEqual([],
_read_lcs(t, ref, c2))
def testUnionLcs(self):
# Example in Section 3.2 of https://www.aclweb.org/anthology/W04-1013,
# except using indices into ref.
# First test helper.
lcs1 = [0, 1] # lcs [1, 2]
lcs2 = [0, 2, 4]
self.assertEqual([0, 1, 2, 4], rouge_scorer._find_union([lcs1, lcs2]))
self.assertEqual([0, 1, 2, 4], rouge_scorer._find_union([lcs2, lcs1]))
ref = [1, 2, 3, 4, 5]
c1 = [1, 2, 6, 7, 8] # lcs = [1, 2]
c2 = [1, 3, 8, 9, 5] # lcs = [1, 3, 5]
self.assertEqual([1, 2, 3, 5],
rouge_scorer._union_lcs(ref, [c1, c2]))
self.assertEqual([1, 2, 3, 5],
rouge_scorer._union_lcs(ref, [c1, c2]))
def testSummaryLevelLcs(self):
refs = [
[1, 2, 3, 4, 5]
]
cans = [
[1, 2, 6, 7, 8], # lcs = [1, 2]
[1, 3, 8, 9, 5] # lcs = [1, 3, 5]
]
score = rouge_scorer._summary_level_lcs(refs, cans)
self.assertEqual(0.8, score.recall) # 4 / 5
self.assertEqual(0.4, score.precision) # 4 / 10
# 0.4*0.8 / (0.4 + 0.8)
self.assertAlmostEqual(0.5333, score.fmeasure, places=3)
# Tokenizer may drop all tokens, resulting in empty candidate list.
score = rouge_scorer._summary_level_lcs([["reference"]], [[]])
self.assertEqual(0.0, score.recall)
def testRougeLsum(self):
scorer = rouge_scorer.RougeScorer(["rougeLsum"])
result = scorer.score("w1 w2 w3 w4 w5", "w1 w2 w6 w7 w8\nw1 w3 w8 w9 w5")
self.assertAlmostEqual(0.8, result["rougeLsum"].recall)
self.assertAlmostEqual(0.4, result["rougeLsum"].precision)
self.assertAlmostEqual(0.5333, result["rougeLsum"].fmeasure, places=3)
# Empty case
result = scorer.score("w1 w2 w3 w4 w5", "")
self.assertAlmostEqual(0.0, result["rougeLsum"].fmeasure, places=3)
self.assertAlmostEqual(0.0, result["rougeLsum"].recall, places=3)
self.assertAlmostEqual(0.0, result["rougeLsum"].precision, places=3)
result = scorer.score("", "w1")
self.assertAlmostEqual(0.0, result["rougeLsum"].fmeasure, places=3)
self.assertAlmostEqual(0.0, result["rougeLsum"].recall, places=3)
self.assertAlmostEqual(0.0, result["rougeLsum"].precision, places=3)
# Case in which summary is all non-word characters.
result = scorer.score("w1 w2 w3 w4 w5", "/")
self.assertAlmostEqual(0.0, result["rougeLsum"].fmeasure, places=3)
self.assertAlmostEqual(0.0, result["rougeLsum"].recall, places=3)
self.assertAlmostEqual(0.0, result["rougeLsum"].precision, places=3)
def testRougeLsumLarge(self):
with open(test_util.LARGE_PREDICTIONS_FILE) as f:
prediction = f.read()
with open(test_util.LARGE_TARGETS_FILE) as f:
target = f.read()
scorer = rouge_scorer.RougeScorer(["rougeLsum"])
result = scorer.score(target, prediction)
self.assertAlmostEqual(0.533, result["rougeLsum"].fmeasure, places=3)
if __name__ == "__main__":
absltest.main()
|
"""Constants and configs."""
import os
import pathlib
import dotenv
import hikari
dotenv.load_dotenv()
# The reason we need this, is that dns lookup fails with default settings,
# so we need to set the dns severs manually,
# so to stop one dns from ruining our day lets use more than one.
# SOLUTION FROM:
# https://forum.omz-software.com/topic/6751/pymongo-errors-configurationerror-resolver-configuration-could-not-be-read-or-specified-no-nameservers/5
DNS_SERVERS = [
# Google
"8.8.8.8",
"8.8.4.4",
# Cloudflare
"1.1.1.1",
]
def load_required(key: str) -> str:
"""
Load value from env, fails if not found.
Args:
key: key to lookup
Returns:
the found value
Raises:
EnvironmentError: key not found
"""
value = os.getenv(key, None)
if value is None:
raise EnvironmentError(f"Missing envioroment varible {key!r}")
return value
TOKEN = load_required("DISCORD_TOKEN")
DATABASE_URI = load_required("DATABASE_URI")
DATABASE_NAME = load_required("DATABASE_NAME")
# Testing unhides all messages.
# WARNING: DO NOT ENABLE IN PROD
TESTING = bool(int(os.getenv("TESTING", False)))
HIDE_MESSAGES = not TESTING
# Default to real server
BOT_OWNER_ID = int(os.getenv("BOT_OWNER_ID", 366331361583169537))
ADMIN_ROLE_ID = int(os.getenv("ADMIN_ROLE_ID", 797573934848802817))
GUILD_ID = int(os.getenv("GUILD_ID", 797571990176661504))
LOG_CHANNEL_ID = int(os.getenv("LOG_CHANNEL_ID", 876494154354528316))
BIRTHDAY_CHANNEL_ID = int(os.getenv("BIRTHDAY_CHANNEL_ID", 801157827145760768))
class Paths:
"""Folder paths."""
src = pathlib.Path("bot")
modules = src / "modules"
resources = src / "resources"
class Colors:
"""Default colors."""
RED = hikari.Color(0xFF0000)
GREEN = hikari.Color(0x07E500)
BLUE = hikari.Color(0x0044F2)
YELLOW = hikari.Color(0xF7EB02)
|
"""Init module."""
from vizier._src.jax import xla_pareto
from vizier._src.jax.xla_pareto import JaxParetoOptimalAlgorithm
|
# -*- coding: utf-8 -*-
"""
github3.api
===========
:copyright: (c) 2012-2014 by Ian Cordasco
:license: Modified BSD, see LICENSE for more details
"""
import warnings
from functools import wraps
from .github import GitHub, GitHubEnterprise
gh = GitHub()
def deprecated(func):
"""Decorator to mark a function as deprecated."""
@wraps(func)
def deprecation_wrapper(*args, **kwargs):
warnings.warn(
"The anonymous API function `github3.api.{0}` is deprecated. Use "
"`GitHub.{0}` instead.".format(func.__name__),
DeprecationWarning,
stacklevel=2,
)
return func(*args, **kwargs)
return deprecation_wrapper
@deprecated
def authorize(
username,
password,
scopes,
note="",
note_url="",
client_id="",
client_secret="",
two_factor_callback=None,
github=None,
):
"""Obtain an authorization token for the GitHub API.
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.authorize` instead.
:param str username: (required)
:param str password: (required)
:param list scopes: (required), areas you want this token to apply to,
i.e., 'gist', 'user'
:param str note: (optional), note about the authorization
:param str note_url: (optional), url for the application
:param str client_id: (optional), 20 character OAuth client key for which
to create a token
:param str client_secret: (optional), 40 character OAuth client secret for
which to create the token
:param func two_factor_callback: (optional), function to call when a
Two-Factor Authentication code needs to be provided by the user.
:param GitHub github: (optional), GitHub (or GitHubEnterprise) object for
login.
:returns: :class:`Authorization <Authorization>`
"""
gh = github or GitHub()
gh.login(two_factor_callback=two_factor_callback)
return gh.authorize(
username, password, scopes, note, note_url, client_id, client_secret
)
def login(username=None, password=None, token=None, two_factor_callback=None):
"""Construct and return an authenticated GitHub session.
.. note::
To allow you to specify either a username and password combination or
a token, none of the parameters are required. If you provide none of
them, you will receive ``None``.
:param str username: login name
:param str password: password for the login
:param str token: OAuth token
:param func two_factor_callback: (optional), function you implement to
provide the Two-factor Authentication code to GitHub when necessary
:returns: :class:`GitHub <github3.github.GitHub>`
"""
g = None
if (username and password) or token:
g = GitHub()
g.login(username, password, token, two_factor_callback)
return g
def enterprise_login(
username=None,
password=None,
token=None,
url=None,
two_factor_callback=None,
):
"""Construct and return an authenticated GitHubEnterprise session.
.. note::
To allow you to specify either a username and password combination or
a token, none of the parameters are required. If you provide none of
them, you will receive ``None``.
:param str username: login name
:param str password: password for the login
:param str token: OAuth token
:param str url: URL of a GitHub Enterprise instance
:param func two_factor_callback: (optional), function you implement to
provide the Two-factor Authentication code to GitHub when necessary
:returns: :class:`GitHubEnterprise <github3.github.GitHubEnterprise>`
"""
if not url:
raise ValueError(
"GitHub Enterprise requires you provide the URL of"
" the instance"
)
g = None
if (username and password) or token:
g = GitHubEnterprise(url)
g.login(username, password, token, two_factor_callback)
return g
@deprecated
def emojis():
return gh.emojis()
emojis.__doc__ = gh.emojis.__doc__
@deprecated
def gist(id_num):
"""Retrieve the gist identified by ``id_num``.
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.gist` instead.
:param int id_num: (required), unique id of the gist
:returns: :class:`Gist <github3.gists.Gist>`
"""
return gh.gist(id_num)
@deprecated
def gitignore_template(language):
"""Return the template for language.
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.gitignore_template` instead.
:returns: str
"""
return gh.gitignore_template(language)
@deprecated
def gitignore_templates():
"""Return the list of available templates.
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.gitignore_templates` instead.
:returns: list of template names
"""
return gh.gitignore_templates()
@deprecated
def all_repositories(number=-1, etag=None):
"""Iterate over every repository in the order they were created.
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.all_repositories` instead.
:param int number: (optional), number of repositories to return.
Default: -1, returns all of them
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Repository <github3.repos.Repository>`
"""
return gh.all_repositories(number, etag)
@deprecated
def all_users(number=-1, etag=None):
"""Iterate over every user in the order they signed up for GitHub.
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.all_users` instead.
:param int number: (optional), number of users to return. Default: -1,
returns all of them
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`User <github3.users.User>`
"""
return gh.all_users(number, etag)
@deprecated
def all_events(number=-1, etag=None):
"""Iterate over public events.
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.all_events` instead.
:param int number: (optional), number of events to return. Default: -1
returns all available events
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Event <github3.events.Event>`
"""
return gh.all_events(number, etag)
@deprecated
def followers_of(username, number=-1, etag=None):
"""List the followers of ``username``.
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.followers_of` instead.
:param str username: (required), login of the person to list the followers
of
:param int number: (optional), number of followers to return, Default: -1,
return all of them
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`User <github3.users.User>`
"""
return gh.followers_of(username, number, etag) if username else []
@deprecated
def followed_by(username, number=-1, etag=None):
"""List the people ``username`` follows.
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.followed_by` instead.
:param str username: (required), login of the user
:param int number: (optional), number of users being followed by username
to return. Default: -1, return all of them
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`User <github3.users.User>`
"""
return gh.followed_by(username, number, etag) if username else []
@deprecated
def public_gists(number=-1, etag=None):
"""Iterate over all public gists.
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.public_gists` instead.
.. versionadded:: 1.0
This was split from ``github3.iter_gists`` before 1.0.
:param int number: (optional), number of gists to return. Default: -1,
return all of them
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Gist <github3.gists.Gist>`
"""
return gh.public_gists(number, etag)
@deprecated
def gists_by(username, number=-1, etag=None):
"""Iterate over gists created by the provided username.
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.gists_by` instead.
:param str username: (required), if provided, get the gists for this user
instead of the authenticated user.
:param int number: (optional), number of gists to return. Default: -1,
return all of them
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Gist <github3.gists.Gist>`
"""
if username:
return gh.gists_by(username, number, etag)
return iter([])
@deprecated
def issues_on(
owner,
repository,
milestone=None,
state=None,
assignee=None,
mentioned=None,
labels=None,
sort=None,
direction=None,
since=None,
number=-1,
etag=None,
):
r"""Iterate over issues on owner/repository.
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.issues_on` instead.
.. versionchanged:: 0.9.0
- The ``state`` parameter now accepts 'all' in addition to 'open'
and 'closed'.
:param str owner: login of the owner of the repository
:param str repository: name of the repository
:param int milestone: None, '*', or ID of milestone
:param str state: accepted values: ('all', 'open', 'closed')
api-default: 'open'
:param str assignee: '*' or login of the user
:param str mentioned: login of the user
:param str labels: comma-separated list of label names, e.g.,
'bug,ui,@high'
:param str sort: accepted values: ('created', 'updated', 'comments')
api-default: created
:param str direction: accepted values: ('asc', 'desc')
api-default: desc
:param since: (optional), Only issues after this date will
be returned. This can be a `datetime` or an ISO8601 formatted
date string, e.g., 2012-05-20T23:10:27Z
:type since: datetime or string
:param int number: (optional), number of issues to return.
Default: -1 returns all issues
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`ShortIssue <github3.issues.ShortIssue>`\ s
"""
if owner and repository:
return gh.issues_on(
owner,
repository,
milestone,
state,
assignee,
mentioned,
labels,
sort,
direction,
since,
number,
etag,
)
return iter([])
@deprecated
def organizations_with(username, number=-1, etag=None):
"""List the organizations with ``username`` as a member.
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.organizations_with` instead.
:param str username: (required), login of the user
:param int number: (optional), number of orgs to return. Default: -1,
return all of the issues
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of
:class:`ShortOrganization <github3.orgs.ShortOrganization>`
"""
return gh.organizations_with(username, number, etag)
@deprecated
def repositories_by(
username, type=None, sort=None, direction=None, number=-1, etag=None
):
"""List public repositories for the specified ``username``.
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.organizations_with` instead.
.. versionadded:: 0.6
.. note:: This replaces github3.iter_repos
:param str username: (required)
:param str type: (optional), accepted values:
('all', 'owner', 'member')
API default: 'all'
:param str sort: (optional), accepted values:
('created', 'updated', 'pushed', 'full_name')
API default: 'created'
:param str direction: (optional), accepted values:
('asc', 'desc'), API default: 'asc' when using 'full_name',
'desc' otherwise
:param int number: (optional), number of repositories to return.
Default: -1 returns all repositories
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Repository <github3.repos.Repository>`
objects
"""
if login:
return gh.repositories_by(
username, type, sort, direction, number, etag
)
return iter([])
@deprecated
def starred_by(username, number=-1, etag=None):
"""Iterate over repositories starred by ``username``.
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.starred_by` instead.
:param str username: (optional), name of user whose stars you want to see
:param int number: (optional), number of repositories to return.
Default: -1 returns all repositories
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Repository <github3.repos.Repository>`
"""
return gh.starred_by(username, number, etag)
@deprecated
def subscriptions_for(username, number=-1, etag=None):
"""Iterate over repositories subscribed to by ``username``.
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.subscriptions_for` instead.
:param str username: name of user whose subscriptions you want to see
:param int number: (optional), number of repositories to return.
Default: -1 returns all repositories
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Repository <github3.repos.Repository>`
"""
return gh.subscriptions_for(username, number, etag)
@deprecated
def create_gist(description, files):
"""Create an anonymous public gist.
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.create_gist` instead.
:param str description: (required), short description of the gist
:param dict files: (required), file names with associated
dictionaries for content, e.g.
{'spam.txt': {'content': 'File contents ...'}}
:returns: :class:`Gist <github3.gists.Gist>`
"""
return gh.create_gist(description, files) # (No coverage)
@deprecated
def issue(owner, repository, number):
"""Anonymously gets issue :number on :owner/:repository.
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.issue` instead.
:param str owner: (required), repository owner
:param str repository: (required), repository name
:param int number: (required), issue number
:returns: :class:`Issue <github3.issues.Issue>`
"""
return gh.issue(owner, repository, number)
@deprecated
def markdown(text, mode="", context="", raw=False):
"""Render an arbitrary markdown document.
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.markdown` instead.
:param str text: (required), the text of the document to render
:param str mode: (optional), 'markdown' or 'gfm'
:param str context: (optional), only important when using mode 'gfm',
this is the repository to use as the context for the rendering
:param bool raw: (optional), renders a document like a README.md, no gfm,
no context
:returns: str -- HTML formatted text
"""
return gh.markdown(text, mode, context, raw)
@deprecated
def octocat(say=None):
"""Return an easter egg from the API.
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.octocat` instead.
:params str say: (optional), pass in what you'd like Octocat to say
:returns: ascii art of Octocat
"""
return gh.octocat(say)
@deprecated
def organization(name):
return gh.organization(name)
organization.__doc__ = gh.organization.__doc__
@deprecated
def pull_request(owner, repository, number):
"""Anonymously retrieve pull request :number on :owner/:repository.
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.pull_request` instead.
:param str owner: (required), repository owner
:param str repository: (required), repository name
:param int number: (required), pull request number
:returns: :class:`PullRequest <github3.pulls.PullRequest>`
"""
return gh.pull_request(owner, repository, number)
@deprecated
def rate_limit():
return gh.rate_limit()
rate_limit.__doc__ = gh.rate_limit.__doc__
@deprecated
def repository(owner, repository):
return gh.repository(owner, repository)
repository.__doc__ = gh.repository.__doc__
@deprecated
def search_code(
query,
sort=None,
order=None,
per_page=None,
text_match=False,
number=-1,
etag=None,
):
"""Find code via the code search API.
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.search_code` instead.
.. warning::
You will only be able to make 5 calls with this or other search
functions. To raise the rate-limit on this set of endpoints, create an
authenticated :class:`GitHub <github3.github.GitHub>` Session with
``login``.
The query can contain any combination of the following supported
qualifiers:
- ``in`` Qualifies which fields are searched. With this qualifier you
can restrict the search to just the file contents, the file path, or
both.
- ``language`` Searches code based on the language itโs written in.
- ``fork`` Specifies that code from forked repositories should be
searched. Repository forks will not be searchable unless the fork
has more stars than the parent repository.
- ``size`` Finds files that match a certain size (in bytes).
- ``path`` Specifies the path that the resulting file must be at.
- ``extension`` Matches files with a certain extension.
- ``user`` or ``repo`` Limits searches to a specific user or
repository.
For more information about these qualifiers, see: http://git.io/-DvAuA
:param str query: (required), a valid query as described above, e.g.,
``addClass in:file language:js repo:jquery/jquery``
:param str sort: (optional), how the results should be sorted;
option(s): ``indexed``; default: best match
:param str order: (optional), the direction of the sorted results,
options: ``asc``, ``desc``; default: ``desc``
:param int per_page: (optional)
:param bool text_match: (optional), if True, return matching search
terms. See http://git.io/4ct1eQ for more information
:param int number: (optional), number of repositories to return.
Default: -1, returns all available repositories
:param str etag: (optional), previous ETag header value
:return: generator of :class:`CodeSearchResult
<github3.search.CodeSearchResult>`
"""
return gh.search_code(
query, sort, order, per_page, text_match, number, etag
)
@deprecated
def search_issues(
query,
sort=None,
order=None,
per_page=None,
text_match=False,
number=-1,
etag=None,
):
"""Find issues by state and keyword
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.search_issues` instead.
.. warning::
You will only be able to make 5 calls with this or other search
functions. To raise the rate-limit on this set of endpoints, create an
authenticated :class:`GitHub <github3.github.GitHub>` Session with
``login``.
The query can contain any combination of the following supported
qualifers:
- ``type`` With this qualifier you can restrict the search to issues or
pull request only.
- ``in`` Qualifies which fields are searched. With this qualifier you can
restrict the search to just the title, body, comments, or any
combination of these.
- ``author`` Finds issues created by a certain user.
- ``assignee`` Finds issues that are assigned to a certain user.
- ``mentions`` Finds issues that mention a certain user.
- ``commenter`` Finds issues that a certain user commented on.
- ``involves`` Finds issues that were either created by a certain user,
assigned to that user, mention that user, or were commented on by that
user.
- ``state`` Filter issues based on whether theyโre open or closed.
- ``labels`` Filters issues based on their labels.
- ``language`` Searches for issues within repositories that match a
certain language.
- ``created`` or ``updated`` Filters issues based on times of creation, or
when they were last updated.
- ``comments`` Filters issues based on the quantity of comments.
- ``user`` or ``repo`` Limits searches to a specific user or repository.
For more information about these qualifiers, see: http://git.io/d1oELA
:param str query: (required), a valid query as described above, e.g.,
``windows label:bug``
:param str sort: (optional), how the results should be sorted;
options: ``created``, ``comments``, ``updated``; default: best match
:param str order: (optional), the direction of the sorted results,
options: ``asc``, ``desc``; default: ``desc``
:param int per_page: (optional)
:param bool text_match: (optional), if True, return matching search
terms. See http://git.io/QLQuSQ for more information
:param int number: (optional), number of issues to return.
Default: -1, returns all available issues
:param str etag: (optional), previous ETag header value
:return: generator of :class:`IssueSearchResult
<github3.search.IssueSearchResult>`
"""
return gh.search_issues(
query, sort, order, per_page, text_match, number, etag
)
@deprecated
def search_repositories(
query,
sort=None,
order=None,
per_page=None,
text_match=False,
number=-1,
etag=None,
):
"""Find repositories via various criteria.
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.search_repositories` instead.
.. warning::
You will only be able to make 5 calls with this or other search
functions. To raise the rate-limit on this set of endpoints, create an
authenticated :class:`GitHub <github3.github.GitHub>` Session with
``login``.
The query can contain any combination of the following supported
qualifers:
- ``in`` Qualifies which fields are searched. With this qualifier you
can restrict the search to just the repository name, description,
readme, or any combination of these.
- ``size`` Finds repositories that match a certain size (in
kilobytes).
- ``forks`` Filters repositories based on the number of forks, and/or
whether forked repositories should be included in the results at
all.
- ``created`` or ``pushed`` Filters repositories based on times of
creation, or when they were last updated. Format: ``YYYY-MM-DD``.
Examples: ``created:<2011``, ``pushed:<2013-02``,
``pushed:>=2013-03-06``
- ``user`` or ``repo`` Limits searches to a specific user or
repository.
- ``language`` Searches repositories based on the language they're
written in.
- ``stars`` Searches repositories based on the number of stars.
For more information about these qualifiers, see: http://git.io/4Z8AkA
:param str query: (required), a valid query as described above, e.g.,
``tetris language:assembly``
:param str sort: (optional), how the results should be sorted;
options: ``stars``, ``forks``, ``updated``; default: best match
:param str order: (optional), the direction of the sorted results,
options: ``asc``, ``desc``; default: ``desc``
:param int per_page: (optional)
:param bool text_match: (optional), if True, return matching search
terms. See http://git.io/4ct1eQ for more information
:param int number: (optional), number of repositories to return.
Default: -1, returns all available repositories
:param str etag: (optional), previous ETag header value
:return: generator of :class:`Repository <github3.repos.Repository>`
"""
return gh.search_repositories(
query, sort, order, per_page, text_match, number, etag
)
@deprecated
def search_users(
query,
sort=None,
order=None,
per_page=None,
text_match=False,
number=-1,
etag=None,
):
"""Find users via the Search API.
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.search_users` instead.
.. warning::
You will only be able to make 5 calls with this or other search
functions. To raise the rate-limit on this set of endpoints, create an
authenticated :class:`GitHub <github3.github.GitHub>` Session with
``login``.
The query can contain any combination of the following supported
qualifers:
- ``type`` With this qualifier you can restrict the search to just
personal accounts or just organization accounts.
- ``in`` Qualifies which fields are searched. With this qualifier you
can restrict the search to just the username, public email, full
name, or any combination of these.
- ``repos`` Filters users based on the number of repositories they
have.
- ``location`` Filter users by the location indicated in their
profile.
- ``language`` Search for users that have repositories that match a
certain language.
- ``created`` Filter users based on when they joined.
- ``followers`` Filter users based on the number of followers they
have.
For more information about these qualifiers see: http://git.io/wjVYJw
:param str query: (required), a valid query as described above, e.g.,
``tom repos:>42 followers:>1000``
:param str sort: (optional), how the results should be sorted;
options: ``followers``, ``repositories``, or ``joined``; default:
best match
:param str order: (optional), the direction of the sorted results,
options: ``asc``, ``desc``; default: ``desc``
:param int per_page: (optional)
:param bool text_match: (optional), if True, return matching search
terms. See http://git.io/_V1zRwa for more information
:param int number: (optional), number of search results to return;
Default: -1 returns all available
:param str etag: (optional), ETag header value of the last request.
:return: generator of :class:`UserSearchResult
<github3.search.UserSearchResult>`
"""
return gh.search_users(
query, sort, order, per_page, text_match, number, etag
)
@deprecated
def user(username):
return gh.user(username)
user.__doc__ = gh.user.__doc__
@deprecated
def zen():
"""Return a quote from the Zen of GitHub. Yet another API Easter Egg.
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.zen` instead.
:returns: str
"""
return gh.zen()
|
"""
SoftLayer.tests.CLI.modules.subnet_tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:license: MIT, see LICENSE for more details.
"""
import json
from unittest import mock as mock
import SoftLayer
from SoftLayer.fixtures import SoftLayer_Product_Order
from SoftLayer.fixtures import SoftLayer_Product_Package
from SoftLayer import testing
class SubnetTests(testing.TestCase):
def test_detail(self):
result = self.run_command(['subnet', 'detail', '1234'])
subnet = json.loads(result.output)
self.assert_no_fail(result)
self.assertEqual(subnet.get('id'), 1234)
self.assertEqual(subnet.get('identifier'), '1.2.3.4/26')
def test_list(self):
result = self.run_command(['subnet', 'list'])
self.assert_no_fail(result)
@mock.patch('SoftLayer.CLI.formatting.confirm')
def test_create_subnet_ipv4(self, confirm_mock):
confirm_mock.return_value = True
item_mock = self.set_mock('SoftLayer_Product_Package', 'getItems')
item_mock.return_value = SoftLayer_Product_Package.getItems
place_mock = self.set_mock('SoftLayer_Product_Order', 'placeOrder')
place_mock.return_value = SoftLayer_Product_Order.placeOrder
result = self.run_command(['subnet', 'create', 'private', '8', '12346'])
self.assert_no_fail(result)
output = [
{'Item': 'Total monthly cost', 'cost': '0.00'}
]
self.assertEqual(output, json.loads(result.output))
@mock.patch('SoftLayer.CLI.formatting.confirm')
def test_create_subnet_ipv6(self, confirm_mock):
confirm_mock.return_value = True
item_mock = self.set_mock('SoftLayer_Product_Package', 'getItems')
item_mock.return_value = SoftLayer_Product_Package.getItems
place_mock = self.set_mock('SoftLayer_Product_Order', 'verifyOrder')
place_mock.return_value = SoftLayer_Product_Order.verifyOrder
result = self.run_command(['subnet', 'create', '--v6', 'public', '64', '12346', '--test'])
self.assert_no_fail(result)
output = [
{'Item': 'this is a thing', 'cost': '2.00'},
{'Item': 'Total monthly cost', 'cost': '2.00'}
]
self.assertEqual(output, json.loads(result.output))
def test_create_subnet_no_prices_found(self):
item_mock = self.set_mock('SoftLayer_Product_Package', 'getItems')
item_mock.return_value = SoftLayer_Product_Package.getItems
verify_mock = self.set_mock('SoftLayer_Product_Order', 'verifyOrder')
verify_mock.side_effect = SoftLayer.SoftLayerAPIError('SoftLayer_Exception', 'Price not found')
result = self.run_command(['subnet', 'create', '--v6', 'public', '32', '12346', '--test'])
self.assertRaises(SoftLayer.SoftLayerAPIError, verify_mock)
self.assertIn('Unable to order 32 public ipv6', result.exception.message, )
@mock.patch('SoftLayer.CLI.formatting.confirm')
def test_create_subnet_static(self, confirm_mock):
confirm_mock.return_value = True
item_mock = self.set_mock('SoftLayer_Product_Package', 'getItems')
item_mock.return_value = SoftLayer_Product_Package.getItems
place_mock = self.set_mock('SoftLayer_Product_Order', 'placeOrder')
place_mock.return_value = SoftLayer_Product_Order.placeOrder
result = self.run_command(['subnet', 'create', 'static', '2', '12346'])
self.assert_no_fail(result)
output = [
{'Item': 'Total monthly cost', 'cost': '0.00'}
]
self.assertEqual(output, json.loads(result.output))
@mock.patch('SoftLayer.CLI.formatting.confirm')
def test_create_subnet_static_ipv6(self, confirm_mock):
confirm_mock.return_value = True
item_mock = self.set_mock('SoftLayer_Product_Package', 'getItems')
item_mock.return_value = SoftLayer_Product_Package.getItems
place_mock = self.set_mock('SoftLayer_Product_Order', 'verifyOrder')
place_mock.return_value = SoftLayer_Product_Order.verifyOrder
result = self.run_command(['subnet', 'create', '--v6', 'static', '64', '12346', '--test'])
self.assert_no_fail(result)
output = [
{'Item': 'this is a thing', 'cost': '2.00'},
{'Item': 'Total monthly cost', 'cost': '2.00'}
]
self.assertEqual(output, json.loads(result.output))
@mock.patch('SoftLayer.CLI.subnet.edit.click')
def test_subnet_set_tags(self, click):
result = self.run_command(['subnet', 'edit', '1234', '--tags=tag1,tag2'])
click.secho.assert_called_with('Set tags successfully', fg='green')
self.assert_no_fail(result)
self.assert_called_with('SoftLayer_Network_Subnet', 'setTags', identifier=1234, args=("tag1,tag2",))
@mock.patch('SoftLayer.CLI.subnet.edit.click')
def test_subnet_edit_note(self, click):
result = self.run_command(['subnet', 'edit', '1234', '--note=test'])
click.secho.assert_called_with('Edit note successfully', fg='green')
self.assert_no_fail(result)
self.assert_called_with('SoftLayer_Network_Subnet', 'editNote', identifier=1234, args=("test",))
@mock.patch('SoftLayer.CLI.subnet.edit.click')
def test_subnet_set_tags_failure(self, click):
mock = self.set_mock('SoftLayer_Network_Subnet', 'setTags')
mock.return_value = False
result = self.run_command(['subnet', 'edit', '1234', '--tags=tag1,tag2'])
click.secho.assert_called_with('Failed to set tags', fg='red')
self.assert_no_fail(result)
self.assert_called_with('SoftLayer_Network_Subnet', 'setTags', identifier=1234, args=("tag1,tag2",))
@mock.patch('SoftLayer.CLI.subnet.edit.click')
def test_edit_note_failure(self, click):
mock = self.set_mock('SoftLayer_Network_Subnet', 'editNote')
mock.return_value = False
result = self.run_command(['subnet', 'edit', '1234', '--note=test'])
click.secho.assert_called_with('Failed to edit note', fg='red')
self.assert_no_fail(result)
self.assert_called_with('SoftLayer_Network_Subnet', 'editNote', identifier=1234, args=("test",))
def test_editrou_Ip(self):
result = self.run_command(['subnet', 'edit-ip', '16.26.26.26', '--note=test'])
self.assert_no_fail(result)
self.assertTrue(result)
def test_editrou_Id(self):
result = self.run_command(['subnet', 'edit-ip', '123456', '--note=test'])
self.assert_no_fail(result)
self.assertTrue(result)
def test_lookup(self):
result = self.run_command(['subnet', 'lookup', '1.2.3.10'])
self.assert_no_fail(result)
self.assertEqual(json.loads(result.output), {'device': {
'id': 12856,
'name': 'unit.test.com',
'type': 'server'},
"id": 12345,
"ip": "10.0.1.37",
"subnet": {
"id": 258369,
"identifier": "10.0.1.38/26",
"netmask": "255.255.255.192",
"gateway": "10.47.16.129",
"type": "PRIMARY"
}})
@mock.patch('SoftLayer.CLI.formatting.no_going_back')
def test_cancel(self, confirm_mock):
confirm_mock.return_value = True
result = self.run_command(['subnet', 'cancel', '1234'])
self.assert_no_fail(result)
def test_cancel_fail(self):
result = self.run_command(['subnet', 'cancel', '1234'])
self.assertEqual(result.exit_code, 2)
def test_route(self):
result = self.run_command(['subnet', 'route', '1'])
self.assert_no_fail(result)
self.assertEqual(result.exit_code, 0)
|
"""Data loaders for summarization datasets."""
from lit_nlp.api import dataset as lit_dataset
from lit_nlp.api import types as lit_types
import tensorflow_datasets as tfds
class GigawordData(lit_dataset.Dataset):
"""English Gigaword summarization dataset."""
def __init__(self, split="validation", max_examples=-1):
"""Dataset constructor, loads the data into memory."""
ds = tfds.load("gigaword", split=split)
self._examples = [] # populate this with data records
for record in ds.take(max_examples):
input_text = record["document"].numpy().decode("utf-8")
output_text = record["summary"].numpy().decode("utf-8")
self._examples.append({
"input_text": input_text,
"output_text": output_text,
})
def spec(self) -> lit_types.Spec:
"""Dataset spec, which should match the model"s input_spec()."""
return {
"input_text": lit_types.TextSegment(),
"output_text": lit_types.TextSegment(),
}
class CNNDMData(lit_dataset.Dataset):
"""English CNNDM summarization dataset."""
def __init__(self, split="validation", max_examples=-1, max_seq_len=500):
"""Dataset constructor, loads the data into memory."""
ds = tfds.load("cnn_dailymail", split=split)
self._examples = [] # populate this with data records
for record in ds.take(max_examples):
# format and truncate from the end to max_seq_len tokens.
input_text = " ".join(
record["article"].numpy()\
.decode("utf-8")\
.replace("<br />", "")\
.split()[-max_seq_len:])
output_text = record["highlights"].numpy().decode("utf-8")
self._examples.append({
"input_text": input_text,
"output_text": output_text,
})
def spec(self) -> lit_types.Spec:
"""Dataset spec, which should match the model"s input_spec()."""
return {
"input_text": lit_types.TextSegment(),
"output_text": lit_types.TextSegment(),
}
|
# data from EMSL: https://bse.pnl.gov/bse/portal
# 4-31G EMSL Basis Set Exchange Library 11/9/12 10:13 AM
# Elements References
# -------- ----------
# H, C - F: R. Ditchfield, W.J. Hehre and J.A. Pople, J. Chem. Phys. 54, 724
# (1971).
# He, Ne: Gaussian 90
# Li, Be: These are actually 5-21G basis sets.
# Na - Ar: M.S. Gordon, J.S. Binkley, J.A. Pople, W.J. Pietro and W.J. Hehre,
# J. Am. Chem. Soc. 104, 2797 (1983).
#
H = [[0,
(18.7311370, 0.0334946),
(2.8253944, 0.2347269),
(0.6401217, 0.8137573),],
[0,
(0.1612778, 1.0000000),]]
He = [[0,
(38.4216340, 0.0237660),
(5.7780300, 0.1546790),
(1.2417740, 0.4696300),],
[0,
(0.2979640, 1.0000000),]]
Li = [[0,
(275.3944400, 0.00612185),
(41.4351750, 0.04511296),
(9.3669938, 0.19269415),
(2.5377253, 0.46854421),
(0.7466365, 0.44060752),],
[0,
(0.7345643, -0.25253680),
(0.0871980, 1.09734080),],
[0,
(0.0404387, 1.0000000),],
[1,
(0.7345643, 0.14359173),
(0.0871980, 0.94780305),],
[1,
(0.0404387, 1.0000000),]]
Be = [[0,
(554.0100000, 0.00540997),
(83.2631000, 0.04025150),
(18.8635000, 0.17685800),
(5.1778200, 0.45255900),
(1.5560200, 0.47029300),],
[0,
(1.4417524910, -0.4774290),
(0.3018610597, 1.2474500),],
[0,
(0.1009613875, 1.0000000),],
[1,
(1.4417524910, 0.2011420),
(0.3018610597, 0.8844830),],
[1,
(0.1009613875, 1.0000000),]]
B = [[0,
(330.7528500, 0.0179942),
(49.8438650, 0.1246937),
(11.1170540, 0.4343354),
(2.9227243, 0.5609794),],
[0,
(5.6812646, -0.1303871),
(1.4544046, -0.2514344),
(0.4283786, 1.2051292),],
[0,
(0.1442192, 1.0000000),],
[1,
(5.6812646, 0.0637429),
(1.4544046, 0.2761331),
(0.4283786, 0.7773866),],
[1,
(0.1442192, 1.0000000),]]
C = [[0,
(486.9669300, 0.0177258),
(73.3710940, 0.1234787),
(16.4134580, 0.4338754),
(4.3449836, 0.5615042),],
[0,
(8.6735253, -0.1213837),
(2.0966193, -0.2273385),
(0.6046513, 1.1851739),],
[0,
(0.1835578, 1.0000000),],
[1,
(8.6735253, 0.0635454),
(2.0966193, 0.2982678),
(0.6046513, 0.7621032),],
[1,
(0.1835578, 1.0000000),]]
N = [[0,
(671.2795000, 0.0175982511),
(101.2017000, 0.1228462410),
(22.6999700, 0.4337821410),
(6.0406090, 0.5614182170),],
[0,
(12.3935997, -0.1174892990),
(2.9223828, -0.2139940160),
(0.83252808, 1.1745021100),],
[0,
(0.2259640, 1.0000000),],
[1,
(12.3935997, 0.0640203443),
(2.9223828, 0.3112025550),
(0.83252808, 0.7527482390),],
[1,
(0.2259640, 1.0000000),]]
O = [[0,
(883.2728600, 0.0175506),
(133.1292800, 0.1228292),
(29.9064080, 0.4348836),
(7.9786772, 0.5600108),],
[0,
(16.1944470, -0.1134010),
(3.7800860, -0.1772865),
(1.0709836, 1.1504079),],
[0,
(0.2838798, 1.0000000),],
[1,
(16.1944470, 0.0685453),
(3.7800860, 0.3312254),
(1.0709836, 0.7346079),],
[1,
(0.2838798, 1.0000000),]]
F = [[0,
(1126.1630000, 0.0174758),
(169.7432000, 0.1225230),
(38.1815100, 0.4349990),
(10.2120400, 0.5598120),],
[0,
(21.4953700, -0.1110570),
(4.9897780, -0.1683220),
(1.4035740, 1.1436260),],
[0,
(0.3730318, 1.0000000),],
[1,
(21.4953700, 0.0698880),
(4.9897780, 0.3393880),
(1.4035740, 0.7279590),],
[1,
(0.3730318, 1.0000000),]]
Ne = [[0,
(1397.9321000, 0.017423805),
(210.7697800, 0.122272745),
(47.4672570, 0.435014232),
(12.7226260, 0.559714642),],
[0,
(27.2130330, -0.109609439),
(6.2941344, -0.164124890),
(1.7600513, 1.140151590),],
[0,
(0.4618670, 1.0000000),],
[1,
(27.2130330, 0.070440307),
(6.2941344, 0.343993047),
(1.7600513, 0.724514960),],
[1,
(0.4618670, 1.0000000),]]
P = [[0,
(3018.6718000, 0.0185213137),
(455.1271210, 0.129904864),
(102.3147300, 0.455100288),
(27.61784730, 0.533131861),],
[0,
(114.4294010, -0.0247502961),
(26.58229590, -0.1350924600),
(7.871888900, 0.2277360800),
(2.487857250, 0.8755931160),],
[0,
(50.75061900, -0.045119223),
(1.672862420, -0.850472990),
(0.621097412, 1.596285850),],
[0,
(0.167016007, 1.0000000),],
[1,
(114.4294010, 0.0274140025),
(26.58229590, 0.169079142),
(7.871888900, 0.469102089),
(2.487857250, 0.518153059),],
[1,
(50.75061900, 0.00377907118),
(1.672862420, -0.04634384050),
(0.621097412, 1.03394429000),],
[1,
(0.167016007, 1.0000000),]]
S = [[0,
(3442.1244000, 0.0184921),
(518.9131000, 0.1298220),
(116.6909000, 0.4550418),
(31.5716470, 0.5330084),],
[0,
(127.4405800, -0.0272646),
(29.7476670, -0.1424834),
(8.8346642, 0.2597043),
(2.8173898, 0.8525473),],
[0,
(3.7291854, -0.2775315),
(1.4067702, -0.4576435),
(0.5481100, 1.4316843),],
[0,
(0.1703809, 1.0000000),],
[1,
(127.4405800, 0.0291520),
(29.7476670, 0.1779597),
(8.8346642, 0.4836237),
(2.8173898, 0.4942553),],
[1,
(3.7291854, -0.0337509),
(1.4067702, 0.1457110),
(0.5481100, 0.8982887),],
[1,
(0.1703809, 1.0000000),]]
Cl = [[0,
(3910.3026000, 0.0183794),
(589.5518000, 0.1291401),
(132.5939200, 0.4540448),
(35.9035420, 0.5344394),],
[0,
(147.7653500, -0.0267433),
(34.5060750, -0.1446911),
(10.2864710, 0.2517035),
(3.3111473, 0.8598203),],
[0,
(4.280284910, -0.2703963),
(1.641016670, -0.3416297),
(0.614478503, 1.3500245),],
[0,
(0.195659411, 1.0000000),],
[1,
(147.7653500, 0.0288645),
(34.5060750, 0.1779647),
(10.2864710, 0.4869998),
(3.3111473, 0.4890184),],
[1,
(4.280284910, -0.0367028),
(1.641016670, 0.1918492),
(0.614478503, 0.8643376),],
[1,
(0.195659411, 1.0000000),]]
|
import os
import sys
import urllib
import zipfile
class DatasetExplorer:
"""
Yeri geldiฤinde bir adet nesne oluลturulmalฤฑdฤฑr.
Lรผtfen bu sฤฑnฤฑftan oluลturulacak nesneye dฤฑลarฤฑdan mรผdahalede bulunmayฤฑn.
"""
def __init__(self, demanded_datasets, path_dict):
self.to_be_used_datasets = demanded_datasets
self.path_dict = path_dict
self.local_datasets = []
self.download_queue = []
self.install_queue = []
def scan(self):
"""
Scans the Datasets folder
:return:
"""
try:
for dataset_folder in os.scandir(
self.path_dict['DATASETS_FOLDER']): # phase one -> scan local datasets dir
if not dataset_folder.name.startswith('.') and dataset_folder.is_dir():
self.local_datasets.append(dataset_folder.name)
print("Local dataset found : ", dataset_folder.name, 'Folder size',
self.get_tree_size(
os.path.join(self.path_dict['DATASETS_FOLDER'], dataset_folder.name)) / 10 ** 6,
'MB')
for dataset in self.to_be_used_datasets:
if dataset not in self.local_datasets:
print(dataset, ' verisetinin bilgisayarฤฑnฤฑzda yรผklรผ olmadฤฑฤฤฑ gรถrรผldรผ. ฤฐndirilecek.')
self.download_queue.append(dataset)
print("Eฤer bir verisetinin yanlฤฑล indirildiฤini dรผลรผnรผyorsanฤฑz, "
"verisetini silip programฤฑ tekrar รงalฤฑลtฤฑrฤฑn.")
return self.local_datasets
except:
print("Dataset Okuma sฤฑrasฤฑnda bir hata oluลmuล olabilir.")
def download_datasets(self):
if len(self.download_queue) == 0:
print("ฤฐstenen verisetleri bilgisayarฤฑnฤฑzda yรผklรผ.. \nBir sonraki adฤฑma geรงiliyor..")
return
downloads_path = self.path_dict['DOWNLOADS_FOLDER']
datasets_path = self.path_dict['DATASETS_FOLDER']
for dataset in self.download_queue:
print('{} indiriliyor'.format(dataset))
downloads_path = self.path_dict['DOWNLOADS_FOLDER']
datasets_path = self.path_dict['DATASETS_FOLDER']
dataset_zip_name = '{}.zip'.format(dataset)
dataset_download_path = os.path.join(datasets_path, dataset_zip_name)
dataset_download_path = os.path.join(downloads_path, dataset_zip_name)
download_url = 'https://audio-sentiment-analysis-tool-cloud-storage.s3.eu-central-1.amazonaws.com/{}'.format(
dataset_zip_name)
print('{} adresinden indirme iลlemi gerรงekleลiyor..'.format(download_url))
urllib.request.urlretrieve(download_url, dataset_download_path)
print('{} arลivden รงฤฑkartฤฑlฤฑyor ve kopyalanฤฑyor..'.format(dataset))
with zipfile.ZipFile(dataset_download_path, 'r') as zip_ref:
zip_ref.extractall(os.path.join(datasets_path))
# TODO-> implement install and unzip datasets
def get_tree_size(self, path):
"""Verilen dizinin alt dizinleri ile birlikte byte
cinsinden boyutunu dรถndรผrรผr.
"""
total = 0
for entry in os.scandir(path):
try:
is_dir = entry.is_dir(follow_symlinks=False)
except OSError as error:
print('Error calling is_dir():', error, file=sys.stderr)
continue
if is_dir:
total += self.get_tree_size(entry.path)
else:
try:
total += entry.stat(follow_symlinks=False).st_size
except OSError as error:
print('Error calling stat():', error, file=sys.stderr)
return total
|
from torchvision import models
from torch import nn
import torch
import torch.nn.functional as F
class FCN_ResNet18(nn.Module):
def __init__(self, n_class):
super().__init__()
base_model = models.resnet18(pretrained=False)
self.layers = list(base_model.children())
layers=self.layers
self.layer1 = nn.Sequential(*layers[:5]) # size=(N, 64, x.H/2, x.W/2)
self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear')
self.layer2 = layers[5] # size=(N, 128, x.H/4, x.W/4)
self.upsample2 = nn.Upsample(scale_factor=8, mode='bilinear')
self.layer3 = layers[6] # size=(N, 256, x.H/8, x.W/8)
self.upsample3 = nn.Upsample(scale_factor=16, mode='bilinear')
self.layer4 = layers[7] # size=(N, 512, x.H/16, x.W/16)
self.upsample4 = nn.Upsample(scale_factor=32, mode='bilinear')
self.conv1k = nn.Conv2d(64 + 128 + 256 + 512, n_class, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.layer1(x)
up1 = self.upsample1(x)
x = self.layer2(x)
up2 = self.upsample2(x)
x = self.layer3(x)
up3 = self.upsample3(x)
x = self.layer4(x)
up4 = self.upsample4(x)
merge = torch.cat([up1, up2, up3, up4], dim=1)
merge = self.conv1k(merge)
#out = self.sigmoid(merge)
out=merge
return out
class Fcn_Vgg16(nn.Module):
def __init__(self, module_type='32s', n_classes=1, pretrained=True):
super(Fcn_Vgg16, self).__init__()
self.n_classes = n_classes
self.module_type = module_type
# VGG16=2+2+3+3+3+3
# VGG16็ฝ็ป็็ฌฌไธไธชๆจกๅๆฏไธคไธชout_channel=64็ๅท็งฏๅ
self.conv1_block = nn.Sequential(
nn.Conv2d(3, 64, 3, padding=100), #่พๅ
ฅ3้้๏ผ่พๅบ64้้๏ผๅท็งฏๆ ธๅคงๅฐไธบ3๏ผ็จ100ๅกซๅ
nn.ReLU(inplace=True), #inplace=True๏ผ่็ๅ
ๅญ
nn.Conv2d(64, 64, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True), #ๆ ธ็ๅคงๅฐไธบ2๏ผๆญฅ้ฟไธบ2๏ผๅไธๅๆด
)
# VGG16็ฝ็ป็็ฌฌไบไธชๆจกๅๆฏไธคไธชout_channel=128็ๅท็งฏๅ
self.conv2_block = nn.Sequential(
nn.Conv2d(64, 128, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True),
)
# VGG16็ฝ็ป็็ฌฌไธไธชๆจกๅๆฏไธไธชout_channel=256็ๅท็งฏๅ
self.conv3_block = nn.Sequential(
nn.Conv2d(128, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True),
)
# VGG16็ฝ็ป็็ฌฌๅไธชๆจกๅๆฏไธไธชout_channel=512็ๅท็งฏๅ
self.conv4_block = nn.Sequential(
nn.Conv2d(256, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True),
)
# VGG16็ฝ็ป็็ฌฌไบไธชๆจกๅๆฏไธไธชout_channel=512็ๅท็งฏๅ
self.conv5_block = nn.Sequential(
nn.Conv2d(512, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True),
)
self.classifier = nn.Sequential(
nn.Conv2d(512, 4096, 7),
nn.ReLU(inplace=True),
nn.Dropout2d(),
nn.Conv2d(4096, 4096, 1),
nn.ReLU(inplace=True),
nn.Dropout2d(),
nn.Conv2d(4096, self.n_classes, 1),
)
if self.module_type=='16s' or self.module_type=='8s':
self.score_pool4 = nn.Conv2d(512, self.n_classes, 1)
if self.module_type=='8s':
self.score_pool3 = nn.Conv2d(256, self.n_classes, 1)
if pretrained:
self.init_vgg16()
def init_vgg16(self):
vgg16 = models.vgg16(pretrained=True) #่ทๅพๅทฒ็ป่ฎญ็ปๅฅฝ็ๆจกๅ
# -----------่ตๅผๅ้ข2+2+3+3+3ๅฑfeature็็นๅพ-------------
# ็ฑไบvgg16็็นๅพๆฏSequential๏ผ่ทๅพๅ
ถไธญ็ๅญ็ฑป้่ฟchildren()
vgg16_features = list(vgg16.features.children())
#print(vgg16_features)
conv_blocks = [self.conv1_block, self.conv2_block, self.conv3_block, self.conv4_block, self.conv5_block]
conv_ids_vgg = [[0, 4], [5, 9], [10, 16], [17, 23], [24, 30]] #ๅฏนๅบVGG็ไบไธชๅ
for conv_block_id, conv_block in enumerate(conv_blocks):
#print(conv_block_id)
conv_id_vgg = conv_ids_vgg[conv_block_id]
#print(conv_id_vgg)
# zipๅฝๆฐ็จไบๅฐๅฏ่ฟญไปฃ็ๅฏน่ฑกไฝไธบๅๆฐ๏ผๅฐๅฏน่ฑกไธญๅฏนๅบ็ๅ
็ด ๆๅ
ๆไธไธชไธชๅ
็ป๏ผ็ถๅ่ฟๅ็ฑ่ฟไบๅ
็ป็ปๆ็ๅฏน่ฑก๏ผๅปบ็ซไบFCN็ฝ็ปไธVGG็ฝ็ป็ๅฏนๅบๅ
ณ็ณปใ
for l1, l2 in zip(conv_block, vgg16_features[conv_id_vgg[0]:conv_id_vgg[1]]):
if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):
assert l1.weight.size() == l2.weight.size()
assert l1.bias.size() == l2.bias.size()
# ๅฐ็ฝ็ปๅฏนๅบ็ๆ้็ฑ่ฎญ็ปๅฅฝ็VGG่ตๅผ็ปFCN
l1.weight.data = l2.weight.data
l1.bias.data = l2.bias.data
# print(l1)
# print(l2)
# -----------่ตๅผๅ้ข3ๅฑclassifier็็นๅพ-------------
vgg16_classifier = list(vgg16.classifier.children())
for l1, l2 in zip(self.classifier, vgg16_classifier[0:3]):
if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Linear):
l1.weight.data = l2.weight.data.view(l1.weight.size())
l1.bias.data = l2.bias.data.view(l1.bias.size())
# -----่ตๅผๅ้ข1ๅฑclassifier็็นๅพ๏ผ็ฑไบ็ฑปๅซไธๅ๏ผ้่ฆไฟฎๆน------
l1 = self.classifier[6]
l2 = vgg16_classifier[6]
if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Linear):
l1.weight.data = l2.weight.data[:self.n_classes, :].view(l1.weight.size())
l1.bias.data = l2.bias.data[:self.n_classes].view(l1.bias.size())
def forward(self, x):
'''
:param x: (1, 3, 360, 480)
:return:
'''
conv1 = self.conv1_block(x)
conv2 = self.conv2_block(conv1)
conv3 = self.conv3_block(conv2)
conv4 = self.conv4_block(conv3)
conv5 = self.conv5_block(conv4)
score = self.classifier(conv5)
#print('score', score.shape) #[1, 21, 12, 16]
if self.module_type=='16s' or self.module_type=='8s':
score_pool4 = self.score_pool4(conv4) #[1, 21, 35, 43]
#print('pool4',score_pool4.shape)
if self.module_type=='8s':
score_pool3 = self.score_pool3(conv3) #[1, 21, 70, 85]
#print('pool3', score_pool3.shape)
# print(conv1.data.size())
# print(conv2.data.size())
# print(conv4.data.size())
# print(conv5.data.size())
# print(score.data.size())
# print(x.data.size())
if self.module_type=='16s' or self.module_type=='8s':
# ๅ็บฟๆงๆๅผ๏ผ็ฑ[1, 21, 12, 16]ๆฉๅคงๅฐ[1, 21, 35, 43]
score = F.interpolate(score, score_pool4.size()[2:], mode='bilinear', align_corners=True)
score += score_pool4
if self.module_type=='8s':
# ๅ็บฟๆงๆๅผ๏ผ็ฑ[1, 21, 35, 43]ๆฉๅคงๅฐ[1, 21, 70, 85]
score = F.interpolate(score, score_pool3.size()[2:], mode='bilinear', align_corners=True)
score += score_pool3
# ๅ็บฟๆงๆๅผ๏ผ็ฑ[1, 21, 35, 43]ๆฉๅคงๅฐ[1, 21, 360, 480]
out = F.interpolate(score, x.size()[2:], mode='bilinear', align_corners=True)
# sigmoid=nn.Sigmoid()
# out=sigmoid(out)
return out
class VGG16(nn.Module):
def __init__(self, n_class):
super().__init__()
base_model = models.vgg16(pretrained=False)
self.layers = list(base_model.children())
layers=self.layers
self.layer1 = nn.Sequential(*layers[:1]) # size=(N, 64, x.H/2, x.W/2)
# self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear')
# self.layer2 = layers[5] # size=(N, 128, x.H/4, x.W/4)
# self.upsample2 = nn.Upsample(scale_factor=8, mode='bilinear')
# self.layer3 = layers[6] # size=(N, 256, x.H/8, x.W/8)
# self.upsample3 = nn.Upsample(scale_factor=16, mode='bilinear')
# self.layer4 = layers[7] # size=(N, 512, x.H/16, x.W/16)
# self.upsample4 = nn.Upsample(scale_factor=32, mode='bilinear')
#
# self.conv1k = nn.Conv2d(64 + 128 + 256 + 512, n_class, 1)
# self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.layer1(x)
up1 = self.upsample1(x)
x = self.layer2(x)
up2 = self.upsample2(x)
x = self.layer3(x)
up3 = self.upsample3(x)
x = self.layer4(x)
up4 = self.upsample4(x)
merge = torch.cat([up1, up2, up3, up4], dim=1)
merge = self.conv1k(merge)
#out = self.sigmoid(merge)
out=merge
return out
if __name__ == '__main__':
net=VGG16(1)
print(net.layers)
print('layer1')
print(net.layer1)
|
import torch
from . import common_functions as c_f
class ModuleWithRecords(torch.nn.Module):
def __init__(self, collect_stats=True):
super().__init__()
self.collect_stats = collect_stats
def add_to_recordable_attributes(
self, name=None, list_of_names=None, is_stat=False
):
if is_stat and not self.collect_stats:
pass
else:
c_f.add_to_recordable_attributes(
self, name=name, list_of_names=list_of_names, is_stat=is_stat
)
def reset_stats(self):
c_f.reset_stats(self)
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import asyncio
import functools
from azure_devtools.scenario_tests.utilities import trim_kwargs_from_test_function
from azure.core.credentials import AccessToken
from .filetestcase import FileTestCase
LOGGING_FORMAT = '%(asctime)s %(name)-20s %(levelname)-5s %(message)s'
class AsyncFakeTokenCredential(object):
"""Protocol for classes able to provide OAuth tokens.
:param str scopes: Lets you specify the type of access needed.
"""
def __init__(self):
self.token = AccessToken("YOU SHALL NOT PASS", 0)
async def get_token(self, *args):
return self.token
class AsyncStorageTestCase(FileTestCase):
@staticmethod
def await_prepared_test(test_fn):
"""Synchronous wrapper for async test methods. Used to avoid making changes
upstream to AbstractPreparer (which doesn't await the functions it wraps)
"""
@functools.wraps(test_fn)
def run(test_class_instance, *args, **kwargs):
trim_kwargs_from_test_function(test_fn, kwargs)
loop = asyncio.get_event_loop()
return loop.run_until_complete(test_fn(test_class_instance, **kwargs))
return run
def generate_oauth_token(self):
if self.is_live:
from azure.identity.aio import ClientSecretCredential
return ClientSecretCredential(
self.get_settings_value("TENANT_ID"),
self.get_settings_value("CLIENT_ID"),
self.get_settings_value("CLIENT_SECRET"),
)
return self.generate_fake_token()
def generate_fake_token(self):
return AsyncFakeTokenCredential()
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02a_data_anime_heads.ipynb (unless otherwise specified).
__all__ = ['Tokenizer', 'Datasets', 'DataLoaders']
# Cell
import pandas as pd
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
import torch
from torch.utils.data import Dataset, DataLoader
from fastcore.all import *
# Internal Cell
def get_items(data_dir, pct=1, valid_pct=0.2):
df = pd.read_csv(data_dir/'tags.csv', header=None, names=['id', 'cap'])
df = df[:int(len(df)*pct)]
if valid_pct==0:
return df, pd.DataFrame(data=None, columns=['id', 'cap'])
train_items, valid_items = train_test_split(df, test_size=valid_pct, random_state=42, shuffle=True, stratify=df.cap)
return train_items, valid_items
# Cell
class Tokenizer():
def __init__(self):
self.vocab = [
'<pad>', 'orange hair', 'white hair', 'aqua hair', 'gray hair','green hair', 'red hair',
'purple hair', 'pink hair','blue hair', 'black hair', 'brown hair', 'blonde hair', 'black eyes', 'orange eyes',
'purple eyes', 'pink eyes', 'yellow eyes', 'aqua eyes', 'green eyes', 'brown eyes', 'red eyes', 'blue eyes',
]
self.o2i = {v:k for k,v in enumerate(self.vocab)}
self.max_seq_len = 2
self.vocab_sz = len(self.vocab)
self.pad_id = 0
def encode(self, cap):
"cap: 'aqua hair aqua eyes', returns: tag: [2, 17], tag_len: 2"
cap = cap.split()
tags = [' '.join(cap[:2]), ' '.join(cap[2:])]
return [self.o2i[tags[0]], self.o2i[tags[1]]], self.max_seq_len
def decode(self, o):
"o: [2, 17], returns: 'aqua hair aqua eyes'"
tags = [self.vocab[idx] for idx in o]
# tags = [self.vocab[o[0]], self.vocab[o[1]]]
return ' '.join(tags)
# Internal Cell
class AnimeHeadsDataset(Dataset):
def __init__(self, items, data_dir):
"items: df of id and cap"
self.data_dir = data_dir
self.items = list(items.itertuples(index=False, name=None))
self.tokenizer = Tokenizer()
def __len__(self):
return len(self.items)
def __getitem__(self, idx):
return self.tfm(self.items[idx])
def tfm(self, item):
''' item: (0, aqua hair aqua eyes),
returns: tag: (2,), tag_len: (), img64: (64, 64, 3) '''
img_id, cap = item
tag, tag_len = self.tokenizer.encode(cap)
img_path = self.data_dir/f'imgs/{img_id}.jpg'
img64 = np.array(Image.open(img_path))
if len(img64.shape)==2:
img64 = np.repeat(img64[...,None], 3, axis=2)
return torch.tensor(tag), torch.tensor(tag_len), torch.tensor(img64)
# Cell
class Datasets():
def __init__(self, data_dir, pct=1, valid_pct=0.2):
train_items, valid_items = get_items(data_dir, pct=pct, valid_pct=valid_pct)
self.train = AnimeHeadsDataset(train_items, data_dir)
self.valid = AnimeHeadsDataset(valid_items, data_dir)
# Cell
class DataLoaders():
def __init__(self, dsets, bs=64):
self.dsets = dsets
self.train = DataLoader(dsets.train, batch_size=bs, shuffle=True, num_workers=2, drop_last=True)
self.valid = DataLoader(dsets.valid, batch_size=bs, shuffle=False, num_workers=2)
|
from collections import OrderedDict
from .common import EWSAccountService, create_shape_element
from ..util import create_element, set_xml_value, TNS, MNS
from ..version import EXCHANGE_2010
class FindFolder(EWSAccountService):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/findfolder"""
SERVICE_NAME = 'FindFolder'
element_container_name = '{%s}Folders' % TNS
paging_container_name = '{%s}RootFolder' % MNS
supports_paging = True
def call(self, folders, additional_fields, restriction, shape, depth, max_items, offset):
"""Find subfolders of a folder.
:param folders: the folders to act on
:param additional_fields: the extra fields that should be returned with the folder, as FieldPath objects
:param restriction: Restriction object that defines the filters for the query
:param shape: The set of attributes to return
:param depth: How deep in the folder structure to search for folders
:param max_items: The maximum number of items to return
:param offset: the offset relative to the first item in the item collection. Usually 0.
:return: XML elements for the matching folders
"""
from ..folders import Folder
roots = {f.root for f in folders}
if len(roots) != 1:
raise ValueError('FindFolder must be called with folders in the same root hierarchy (%r)' % roots)
root = roots.pop()
for elem in self._paged_call(
payload_func=self.get_payload,
max_items=max_items,
expected_message_count=len(folders),
**dict(
folders=folders,
additional_fields=additional_fields,
restriction=restriction,
shape=shape,
depth=depth,
page_size=self.chunk_size,
offset=offset,
)
):
if isinstance(elem, Exception):
yield elem
continue
yield Folder.from_xml_with_root(elem=elem, root=root)
def get_payload(self, folders, additional_fields, restriction, shape, depth, page_size, offset=0):
findfolder = create_element('m:%s' % self.SERVICE_NAME, attrs=dict(Traversal=depth))
foldershape = create_shape_element(
tag='m:FolderShape', shape=shape, additional_fields=additional_fields, version=self.account.version
)
findfolder.append(foldershape)
if self.account.version.build >= EXCHANGE_2010:
indexedpageviewitem = create_element(
'm:IndexedPageFolderView',
attrs=OrderedDict([
('MaxEntriesReturned', str(page_size)),
('Offset', str(offset)),
('BasePoint', 'Beginning'),
])
)
findfolder.append(indexedpageviewitem)
else:
if offset != 0:
raise ValueError('Offsets are only supported from Exchange 2010')
if restriction:
findfolder.append(restriction.to_xml(version=self.account.version))
parentfolderids = create_element('m:ParentFolderIds')
set_xml_value(parentfolderids, folders, version=self.account.version)
findfolder.append(parentfolderids)
return findfolder
|
from deuce.drivers.blockstoragedriver import BlockStorageDriver
from deuce.drivers.metadatadriver \
import MetadataStorageDriver, GapError, OverlapError # noqa
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import tensorflow as tf
import numpy as np
import os
import time
import math
from model import Model_S2VT
from data_generator import Data_Generator
from inference_util import Inference
import configuration
import inception_base
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_integer("eval_interval_secs", 600,
"Interval between evaluation runs.")
tf.flags.DEFINE_integer("min_global_step", 1000,
"Minimum global step to run evaluation.")
tf.flags.DEFINE_integer("batch_size", 500,
"Number of batches to evaluate at once.")
tf.flags.DEFINE_boolean("eval_all_models",False,
"Whether to evaluate all models in checkpoint_dir")
tf.logging.set_verbosity(tf.logging.INFO)
def evaluate_model(sess,model,summary_writer,data_gen):
data_gen.init_batch(FLAGS.batch_size,"val")
total_loss = 0
global_step = sess.run(model.global_step)
start_time = time.time()
for i in range(data_gen.iter_per_epoch["val"]):
dataset = data_gen.get_next_batch("val")
feed_dict={}
feed_dict[model.caption_input] = dataset["indexed_caption"]
feed_dict[model.caption_mask] = dataset["caption_mask"]
feed_dict[model.rnn_input] = dataset["video"]
batch_loss = sess.run(model.batch_loss,feed_dict=feed_dict)
total_loss += batch_loss
if not i % 2:
tf.logging.info("Computed losses for %d of %d batches : %.2f", i + 1,
data_gen.iter_per_epoch["val"],batch_loss)
eval_time = time.time() - start_time
loss_summary = tf.Summary()
value = loss_summary.value.add()
value.simple_value = total_loss/data_gen.iter_per_epoch["val"]
value.tag = "loss/Batch_Loss"
summary_writer.add_summary(loss_summary,global_step)
perplexity = math.exp(total_loss/data_gen.iter_per_epoch["val"])
tf.logging.info("Perplexity = %f (%.2g sec)", perplexity, eval_time)
perp_summary = tf.Summary()
value = perp_summary.value.add()
value.simple_value = perplexity
value.tag = "Perplexity"
summary_writer.add_summary(perp_summary,global_step)
summary_writer.flush()
tf.logging.info("Finished processing evaluation at global step %d.",
global_step)
def run_once(model, saver, summary_writer, data_gen):
"""Evaluates the latest model checkpoint.
"""
if not FLAGS.eval_all_models:
model_path = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
if not model_path:
tf.logging.info("Skipping evaluation. No checkpoint found in: %s",
FLAGS.checkpoint_dir)
return
else:
model_path = FLAGS.checkpoint_file
with tf.Session() as sess:
# Load model from checkpoint.
tf.logging.info("Loading model from checkpoint: %s", model_path)
saver.restore(sess, model_path)
global_step = sess.run(model.global_step)
tf.logging.info("Successfully loaded %s at global step = %d.",
os.path.basename(model_path), global_step)
if global_step < FLAGS.min_global_step:
tf.logging.info("Skipping evaluation. Global step = %d < %d", global_step,
FLAGS.min_global_step)
return
# Run evaluation on the latest checkpoint.
evaluate_model(
sess=sess,
model=model,
summary_writer=summary_writer,
data_gen=data_gen)
def run():
"""Runs evaluation in a loop, and logs summaries to TensorBoard."""
# Create the evaluation directory if it doesn't exist.
data_config = configuration.DataConfig().config
data_gen = Data_Generator(data_config["processed_video_dir"],
data_config["caption_file"],
data_config["unique_frequency_cutoff"],
data_config["max_caption_length"])
data_gen.load_vocabulary(data_config["caption_data_dir"])
data_gen.load_dataset(data_config["caption_data_dir"])
FLAGS.checkpoint_dir = data_config["checkpoint_dir"]
eval_dir = data_config["val_log_dir"]
if not tf.gfile.IsDirectory(eval_dir):
tf.logging.info("Creating eval directory: %s", eval_dir)
tf.gfile.MakeDirs(eval_dir)
g = tf.Graph()
with g.as_default():
# Build the model for evaluation.
model_config = configuration.ModelConfig(data_gen).config
model = Model_S2VT(**model_config)
model.build()
# Create the Saver to restore model Variables.
saver = tf.train.Saver()
# Create the summary operation and the summary writer.
val_writer = tf.summary.FileWriter(data_config["val_log_dir"])
g.finalize()
if(FLAGS.eval_all_models):
model_names = list(set([n.split(".")[0] for n in os.listdir(data_config["checkpoint_dir"]) if "model" in n]))
model_names.sort(key= lambda x: int(x[6:]) )
for name in model_names:
FLAGS.checkpoint_file = os.path.join(data_config["checkpoint_dir"],name)
tf.logging.info("Starting evaluation of %s at " %(name) + time.strftime(
"%Y-%m-%d-%H:%M:%S", time.localtime()))
run_once(model, saver, val_writer,data_gen)
else:
# Run a new evaluation run every eval_interval_secs.
while True:
start = time.time()
tf.logging.info("Starting evaluation at " + time.strftime(
"%Y-%m-%d-%H:%M:%S", time.localtime()))
run_once(model, saver, val_writer,data_gen)
time_to_next_eval = start + FLAGS.eval_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
def main(unused_argv):
run()
if __name__ == "__main__":
tf.app.run()
|
from typing import Dict, Sequence, List
import pyexlatex as pl
from datacode.models.variables import Variable
def model_eqs(structural_dict: Dict[Variable, Sequence[Variable]],
measurement_dict: Dict[Variable, Sequence[Variable]],
var_corr_groups: Sequence[Sequence[Variable]],
**eq_kwargs) -> List[pl.Equation]:
all_eqs = []
for y, x_vars in structural_dict.items():
all_vars = [y, *x_vars]
all_eqs.append(_vars_to_eq(all_vars, operator='=', **eq_kwargs))
for y, x_vars in measurement_dict.items():
all_vars = [y, *x_vars]
all_eqs.append(_vars_to_eq(all_vars, operator='=', **eq_kwargs))
for corr_group in var_corr_groups:
all_eqs.append(_vars_to_eq(corr_group, operator='~~', **eq_kwargs))
valid_eqs = [eq for eq in all_eqs if eq]
return valid_eqs
def _vars_to_eq(var_seq: Sequence[Variable], operator: str = '=', **eq_kwargs):
if len(var_seq) < 2 or (len(var_seq) == 2 and var_seq[0] == var_seq[1]):
return None
lhs = var_seq[0].symbol
rhs = ' + '.join([str(var.symbol) for var in var_seq[1:]])
if operator == '=':
eq_str = f'{lhs} = {rhs}'
elif operator == '~~':
eq_str = rf'{lhs} \sim {rhs}'
else:
raise NotImplementedError(f'operator {operator} not supported')
return pl.Equation(str_eq=eq_str, **eq_kwargs)
|
from abc import ABC, abstractmethod
import geopandas as gpd
import graph_tool
import graph_tool.draw
import graph_tool.topology
import numpy as np
from aves.features.geo import positions_to_array
from .base import Network
class LayoutStrategy(ABC):
def __init__(self, network: Network, name: str):
self.network = network
self.name = name
self.node_positions = None
self.node_positions_dict: dict = None
self.node_positions_vector: np.array = None
@abstractmethod
def layout(self):
pass
def _post_layout(self):
pass
def layout_nodes(self, *args, **kwargs):
self.layout(*args, **kwargs)
self.node_positions_vector = np.array(list(self.node_positions))
self.node_positions_dict = dict(
zip(
list(map(int, self.network.vertices())),
list(self.node_positions_vector),
)
)
self._post_layout()
return self.node_positions
def get_position(self, idx):
idx = int(idx)
return self.node_positions_dict[idx]
def get_angle(self, idx):
raise NotImplementedError("this class doesn't work with angles")
def get_ratio(self, idx):
raise NotImplementedError("this class doesn't work with ratios")
def positions(self):
return self.node_positions_vector
class ForceDirectedLayout(LayoutStrategy):
def __init__(self, network: Network):
super().__init__(network, "force-directed")
def layout(self, *args, **kwargs):
method = kwargs.pop("algorithm", "sfdp")
if not method in ("sfdp", "arf"):
raise ValueError(f"unsupported method: {method}")
if method == "sfdp":
self.node_positions = graph_tool.draw.sfdp_layout(
self.network.graph(),
eweight=self.network.edge_weight,
verbose=kwargs.pop("verbose", False),
**kwargs,
)
else:
self.node_positions = graph_tool.draw.arf_layout(self.network.graph())
class RadialLayout(LayoutStrategy):
def __init__(self, network: Network):
super().__init__(network, "radial")
self.node_angles = None
self.node_angles_dict = None
self.node_ratio = None
def layout(self, *args, **kwargs):
root_node = kwargs.get("root", 0)
self.node_positions = graph_tool.draw.radial_tree_layout(
self.network.graph(), root_node
)
def _post_layout(self):
self.node_angles = np.degrees(
np.arctan2(self.node_positions, self.node_positions)
)
self.node_angles_dict = dict(
zip(self.node_angles_dict.keys(), self.node_angles)
)
self.node_ratios = np.sqrt(np.dot(self.node_positions, self.node_positions))
def get_angle(self, idx):
return self.node_angles_dict[int(idx)]
def get_ratio(self, idx):
return self.node_ratios[int(idx)]
class PrecomputedLayout(LayoutStrategy):
def __init__(self, network: Network):
super().__init__(network, "precomputed")
def layout(self, *args, **kwargs):
positions = np.array(kwargs.get("positions"))
if positions.shape[0] != self.network.num_vertices():
raise ValueError("dimensions do not match")
self.node_positions = self.network.graph().new_vertex_property("vector<double>")
for v, p in zip(self.network.vertices(), positions):
self.node_positions[v] = p
angles = kwargs.get("angles", None)
ratios = kwargs.get("ratios", None)
# print(angles, ratios)
if angles is None and ratios is None:
# do nothing
return
elif angles is not None and ratios is not None:
self.node_ratios = ratios
self.node_angles = angles
else:
raise ValueError("angles and ratios need to be provided simultaneously")
def get_angle(self, idx):
return getattr(self, "node_angles")[int(idx)]
def get_ratio(self, idx):
return getattr(self, "node_ratios")[int(idx)]
class GeographicalLayout(LayoutStrategy):
def __init__(
self, network: Network, geodataframe: gpd.GeoDataFrame, node_column: str = None
):
super().__init__(network, name="geographical")
self.node_column = node_column
if len(self.network.node_map) > len(geodataframe):
raise ValueError(f"GeoDataFrame has missing vertices")
if self.node_column is None:
self.geodf = geodataframe.loc[self.network.node_map.keys()].sort_index()
else:
self.geodf = geodataframe[
geodataframe[node_column].isin(self.network.node_map.keys())
].sort_values(node_column)
if len(self.network.node_map) != len(self.geodf):
raise ValueError(
f"Incompatible shapes: {len(self.network.node_map)} nodes and {len(self.geodf)} shapes. Do you have duplicate rows?"
)
def layout(self, *args, **kwargs):
node_positions = positions_to_array(self.geodf.geometry.centroid)
if len(node_positions) != len(self.network.node_map):
raise ValueError(
f"GeoDataFrame and Network have different lengths after filtering nodes. Maybe there are repeated values in the node column/index."
)
self.node_positions = node_positions
|
# -*- coding: utf-8 -*-
from argh.decorators import arg
import lain_sdk.mydocker as docker
from lain_cli.auth import SSOAccess
from lain_cli.utils import check_phase, get_domain
from lain_sdk.util import info, warn
@arg('phase', help="lain cluster phase id, can be added by lain config save")
def logout(phase):
"""
Logout specific phase
"""
check_phase(phase)
domain = get_domain(phase)
logout_success = SSOAccess.clear_token(phase)
if logout_success:
docker.logout('registry.%s' % domain)
info("Logout successfully!")
else:
warn('Logout failed!')
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pyatv/mrp/protobuf/RegisterHIDDeviceResultMessage.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pyatv.mrp.protobuf import ProtocolMessage_pb2 as pyatv_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pyatv/mrp/protobuf/RegisterHIDDeviceResultMessage.proto',
package='',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n7pyatv/mrp/protobuf/RegisterHIDDeviceResultMessage.proto\x1a(pyatv/mrp/protobuf/ProtocolMessage.proto\"M\n\x1eRegisterHIDDeviceResultMessage\x12\x11\n\terrorCode\x18\x01 \x01(\x05\x12\x18\n\x10\x64\x65viceIdentifier\x18\x02 \x01(\x05:Y\n\x1eregisterHIDDeviceResultMessage\x12\x10.ProtocolMessage\x18\x0c \x01(\x0b\x32\x1f.RegisterHIDDeviceResultMessage')
,
dependencies=[pyatv_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2.DESCRIPTOR,])
REGISTERHIDDEVICERESULTMESSAGE_FIELD_NUMBER = 12
registerHIDDeviceResultMessage = _descriptor.FieldDescriptor(
name='registerHIDDeviceResultMessage', full_name='registerHIDDeviceResultMessage', index=0,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR)
_REGISTERHIDDEVICERESULTMESSAGE = _descriptor.Descriptor(
name='RegisterHIDDeviceResultMessage',
full_name='RegisterHIDDeviceResultMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='errorCode', full_name='RegisterHIDDeviceResultMessage.errorCode', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='deviceIdentifier', full_name='RegisterHIDDeviceResultMessage.deviceIdentifier', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=101,
serialized_end=178,
)
DESCRIPTOR.message_types_by_name['RegisterHIDDeviceResultMessage'] = _REGISTERHIDDEVICERESULTMESSAGE
DESCRIPTOR.extensions_by_name['registerHIDDeviceResultMessage'] = registerHIDDeviceResultMessage
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RegisterHIDDeviceResultMessage = _reflection.GeneratedProtocolMessageType('RegisterHIDDeviceResultMessage', (_message.Message,), {
'DESCRIPTOR' : _REGISTERHIDDEVICERESULTMESSAGE,
'__module__' : 'pyatv.mrp.protobuf.RegisterHIDDeviceResultMessage_pb2'
# @@protoc_insertion_point(class_scope:RegisterHIDDeviceResultMessage)
})
_sym_db.RegisterMessage(RegisterHIDDeviceResultMessage)
registerHIDDeviceResultMessage.message_type = _REGISTERHIDDEVICERESULTMESSAGE
pyatv_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2.ProtocolMessage.RegisterExtension(registerHIDDeviceResultMessage)
# @@protoc_insertion_point(module_scope)
|
#!/usr/bin/env python
"""insertionsort.py: Program to implement insertion sort"""
__author__ = 'Rohit Sinha'
def insertion_sort(alist):
for selected in range(1, len(alist)):
selected_value = alist[selected]
pos = selected
while pos > 0 and alist[pos - 1] > selected_value:
alist[pos] = alist[pos - 1]
pos -= 1
alist[pos] = selected_value
if __name__ == '__main__':
alist = [84, 69, 76, 86, 94, 91]
insertion_sort(alist)
print(alist)
|
#!/usr/bin/env python3
#
# Script to test LUPFactors_simple, LUPFactors and LUPPFactors on a variety
# of ill-conditioned matrices.
#
# Daniel R. Reynolds
# SMU Mathematics
# Math 4315
# imports
import numpy
import time
from LUPFactors_simple import LUPFactors_simple
from LUPFactors import LUPFactors
from LUPPFactors import LUPPFactors
# set matrix sizes for tests
nvals = [20, 40, 80, 160]
# loop over matrix sizes
for n in nvals:
print("Testing stabilization approaches for linear system of dimension ", n)
# create the matrix
A = numpy.vander(numpy.linspace(0.1,1,n)) + 0.0001*numpy.random.rand(n,n)
n2 = n//2
randrows = numpy.random.randint(0,n-1,n2)
randcols = numpy.random.randint(0,n-1,n2)
A[randrows,:] = numpy.diag(1000*numpy.random.rand(n2))@A[randrows,:]
A[:,randcols] = A[:,randcols]@numpy.diag(1000*numpy.random.rand(n2))
# test LUPFactors_simple
print(" LUPFactors_simple:")
ts = time.time()
L, U, P = LUPFactors_simple(A)
print(" runtime = ", time.time()-ts)
print(" norm(A - P^T L U) = ", numpy.linalg.norm(A - P.T@L@U))
# test LUPFactors
print(" LUPFactors:")
ts = time.time()
L, U, P = LUPFactors(A)
print(" runtime = ", time.time()-ts)
print(" norm(A - P^T L U) = ", numpy.linalg.norm(A - P.T@L@U))
# test LUPPFactors
print(" LUPPFactors:")
ts = time.time()
L, U, P1, P2 = LUPPFactors(A)
print(" runtime = ", time.time()-ts)
print(" norm(A - P1^T L U P2^T) = ", numpy.linalg.norm(A - P1.T@L@U@P2.T))
# end of script
|
#!/usr/bin/env python3
#https://codeforces.com/problemset/problem/1140/D
#ๆๆ้ฝๅธฆ1?
#\Sum n**2 ็ๅ
ฌๅผ => https://zhuanlan.zhihu.com/p/26351880
i = int(input())
print((i-1)*i*(i+1)//3-2)
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from unittest import TestCase, mock
from airflow.providers.google.marketing_platform.hooks.display_video import GoogleDisplayVideo360Hook
from tests.gcp.utils.base_gcp_mock import mock_base_gcp_hook_default_project_id
API_VERSION = "v1"
GCP_CONN_ID = "google_cloud_default"
class TestGoogleDisplayVideo360Hook(TestCase):
def setUp(self):
with mock.patch(
"airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.hook = GoogleDisplayVideo360Hook(gcp_conn_id=GCP_CONN_ID)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook._authorize"
)
@mock.patch("airflow.providers.google.marketing_platform.hooks."
"display_video.build")
def test_gen_conn(self, mock_build, mock_authorize):
result = self.hook.get_conn()
mock_build.assert_called_once_with(
"doubleclickbidmanager",
API_VERSION,
http=mock_authorize.return_value,
cache_discovery=False,
)
self.assertEqual(mock_build.return_value, result)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn"
)
def test_create_query(self, get_conn_mock):
body = {"body": "test"}
return_value = "TEST"
get_conn_mock.return_value.queries.return_value.createquery.return_value.execute.return_value = (
return_value
)
result = self.hook.create_query(query=body)
get_conn_mock.return_value.queries.return_value.createquery.assert_called_once_with(
body=body
)
self.assertEqual(return_value, result)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn"
)
def test_delete_query(self, get_conn_mock):
query_id = "QUERY_ID"
return_value = "TEST"
get_conn_mock.return_value.queries.return_value.deletequery.return_value.execute.return_value = (
return_value
)
self.hook.delete_query(query_id=query_id)
get_conn_mock.return_value.queries.return_value.deletequery.assert_called_once_with(
queryId=query_id
)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn"
)
def test_get_query(self, get_conn_mock):
query_id = "QUERY_ID"
return_value = "TEST"
get_conn_mock.return_value.queries.return_value.getquery.return_value.execute.return_value = (
return_value
)
result = self.hook.get_query(query_id=query_id)
get_conn_mock.return_value.queries.return_value.getquery.assert_called_once_with(
queryId=query_id
)
self.assertEqual(return_value, result)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn"
)
def test_list_queries(self, get_conn_mock):
queries = ["test"]
return_value = {"queries": queries}
get_conn_mock.return_value.queries.return_value.listqueries.return_value.execute.return_value = (
return_value
)
result = self.hook.list_queries()
get_conn_mock.return_value.queries.return_value.listqueries.assert_called_once_with()
self.assertEqual(queries, result)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn"
)
def test_run_query(self, get_conn_mock):
query_id = "QUERY_ID"
params = {"params": "test"}
self.hook.run_query(query_id=query_id, params=params)
get_conn_mock.return_value.queries.return_value.runquery.assert_called_once_with(
queryId=query_id, body=params
)
|
###############################################################################
# WaterTAP Copyright (c) 2021, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National
# Laboratory, National Renewable Energy Laboratory, and National Energy
# Technology Laboratory (subject to receipt of any required approvals from
# the U.S. Dept. of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
#
###############################################################################
"""
This module contains the methods for constructing the material balances for
zero-order single-input/single-output (SISO) unit models (i.e. units with a single inlet and single
outlet where composition changes, such as a generic bioreactor).
"""
import idaes.logger as idaeslog
from idaes.core.solvers import get_solver
import idaes.core.util.scaling as iscale
from idaes.core.util.exceptions import InitializationError
from pyomo.environ import (
check_optimal_termination,
NonNegativeReals,
Var,
units as pyunits,
)
# Some more information about this module
__author__ = "Adam Atia"
# Set up logger
_log = idaeslog.getLogger(__name__)
def build_siso(self):
"""
Helper method for constructing material balances for zero-order type models
with SISO behavior.
Two StateBlocks are added with two corresponding Ports:
* properties_in --> inlet
* properties_treated ---> treated
Two additional variables are added:
* recovery_frac_mass_H2O (indexed by time)
* removal_frac_mass_solute (indexed by time and solute)
Two additional constraints are added to represent the material balances
* water_recovery_equation (indexed by time)
* solute_treated_equation (indexed by time and solute)
This method also sets private attributes on the unit model with references
to the appropriate initialization and scaling methods to use and to return
the inlet volumetric flow rate.
"""
self._has_recovery_removal = True
self._initialize = initialize_siso
self._scaling = calculate_scaling_factors_siso
# Create state blocks for inlet and outlets
tmp_dict = dict(**self.config.property_package_args)
tmp_dict["has_phase_equilibrium"] = False
tmp_dict["defined_state"] = True
self.properties_in = self.config.property_package.build_state_block(
self.flowsheet().time, doc="Material properties at inlet", default=tmp_dict
)
tmp_dict_2 = dict(**tmp_dict)
tmp_dict_2["defined_state"] = False
self.properties_treated = self.config.property_package.build_state_block(
self.flowsheet().time,
doc="Material properties of treated water",
default=tmp_dict_2,
)
# Create Ports
self.add_port("inlet", self.properties_in, doc="Inlet port")
self.add_port("treated", self.properties_treated, doc="Treated water outlet port")
# Add performance variables
self.recovery_frac_mass_H2O = Var(
self.flowsheet().time,
initialize=0.8,
domain=NonNegativeReals,
units=pyunits.dimensionless,
bounds=(1e-8, 1.0000001),
doc="Mass recovery fraction of water in the treated stream",
)
self.removal_frac_mass_solute = Var(
self.flowsheet().time,
self.config.property_package.solute_set,
domain=NonNegativeReals,
initialize=0.01,
units=pyunits.dimensionless,
doc="Solute removal fraction on a mass basis",
)
# Add performance constraints
# Water recovery
@self.Constraint(self.flowsheet().time, doc="Water recovery equation")
def water_recovery_equation(b, t):
return (
b.recovery_frac_mass_H2O[t] * b.properties_in[t].flow_mass_comp["H2O"]
== b.properties_treated[t].flow_mass_comp["H2O"]
)
# Solute concentration of treated stream
@self.Constraint(
self.flowsheet().time,
self.config.property_package.solute_set,
doc="Constraint for solute concentration in treated " "stream.",
)
def solute_treated_equation(b, t, j):
return (1 - b.removal_frac_mass_solute[t, j]) * b.properties_in[
t
].flow_mass_comp[j] == b.properties_treated[t].flow_mass_comp[j]
self._stream_table_dict = {"Inlet": self.inlet, "Treated": self.treated}
self._perf_var_dict["Solute Removal"] = self.removal_frac_mass_solute
self._get_Q = _get_Q_siso
def initialize_siso(
blk, state_args=None, outlvl=idaeslog.NOTSET, solver=None, optarg=None
):
"""
Initialization routine for single inlet-single outlet unit models.
Keyword Arguments:
state_args : a dict of arguments to be passed to the property
package(s) to provide an initial state for
initialization (see documentation of the specific
property package) (default = {}).
outlvl : sets output level of initialization routine
optarg : solver options dictionary object (default=None, use
default solver options)
solver : str indicating which solver to use during
initialization (default = None, use default IDAES solver)
Returns:
None
"""
if optarg is None:
optarg = {}
# Set solver options
init_log = idaeslog.getInitLogger(blk.name, outlvl, tag="unit")
solve_log = idaeslog.getSolveLogger(blk.name, outlvl, tag="unit")
solver_obj = get_solver(solver, optarg)
# Get initial guesses for inlet if none provided
if state_args is None:
state_args = {}
state_dict = blk.properties_in[
blk.flowsheet().time.first()
].define_port_members()
for k in state_dict.keys():
if state_dict[k].is_indexed():
state_args[k] = {}
for m in state_dict[k].keys():
state_args[k][m] = state_dict[k][m].value
else:
state_args[k] = state_dict[k].value
# ---------------------------------------------------------------------
# Initialize state blocks
flags = blk.properties_in.initialize(
outlvl=outlvl,
optarg=optarg,
solver=solver,
state_args=state_args,
hold_state=True,
)
blk.properties_treated.initialize(
outlvl=outlvl,
optarg=optarg,
solver=solver,
state_args=state_args,
hold_state=False,
)
init_log.info_high("Initialization Step 1 Complete.")
# ---------------------------------------------------------------------
# Solve unit
with idaeslog.solver_log(solve_log, idaeslog.DEBUG) as slc:
results = solver_obj.solve(blk, tee=slc.tee)
init_log.info_high("Initialization Step 2 {}.".format(idaeslog.condition(results)))
# ---------------------------------------------------------------------
# Release Inlet state
blk.properties_in.release_state(flags, outlvl)
init_log.info("Initialization Complete: {}".format(idaeslog.condition(results)))
if not check_optimal_termination(results):
raise InitializationError(
f"{blk.name} failed to initialize successfully. Please check "
f"the output logs for more information."
)
def calculate_scaling_factors_siso(self):
# Get default scale factors and do calculations from base classes
for t, v in self.water_recovery_equation.items():
iscale.constraint_scaling_transform(
v,
iscale.get_scaling_factor(
self.properties_in[t].flow_mass_comp["H2O"],
default=1,
warning=True,
hint=" for water recovery",
),
)
for (t, j), v in self.solute_treated_equation.items():
iscale.constraint_scaling_transform(
v,
iscale.get_scaling_factor(
self.properties_in[t].flow_mass_comp[j], default=1, warning=False
),
) # would just be a duplicate of above
def _get_Q_siso(self, t):
return self.properties_in[t].flow_vol
|
from os import path, remove, mkdir, getcwd
import sys
from shutil import copytree, rmtree, copy as shcopy
import PyInstaller.__main__
from py7zr import SevenZipFile
from main.code.engine.constants import cprint, clear_terminal, colorize
import re
# Build instructions
def get_version() -> str:
"""Get version in format (v#.#.#Optional[-alpha|beta])"""
# Get user input of version
version = input("version: ")
if version == "exit":
sys.exit()
reg = r"(v[0-9]\.[0-9]\.[0-9])+(-alpha|-beta)?"
if re.fullmatch(reg, version):
return version
else:
raise ValueError("Improper version string")
def main():
# Clear the terminal
clear_terminal()
# Get version
osname = sys.platform
if osname.startswith("win"):
os_platform = "windows"
elif osname.startswith("linux"):
os_platform = "linux"
else:
msg = "unable to build for systems other than win & linux"
msg = colorize(msg, "red")
raise RuntimeError(msg)
# Get directories and version
version = get_version()
dir_main = getcwd()
dir_dist = path.join(dir_main, "dist")
# Clean directory
dir_version = path.join(dir_dist, version)
if not path.exists(dir_version):
mkdir(dir_version)
dir_os = path.join(dir_version, os_platform)
if path.exists(dir_os):
rmtree(dir_os)
mkdir(dir_os)
cprint("~~Directory Cleaned~~\n", "green")
# Create executable
executable_name = "game-x_" + version + "-" + os_platform
print("Creating executable...")
arguments = [
"game.py",
"--onefile",
"--noconsole",
"--debug=all",
"-n" + executable_name,
]
PyInstaller.__main__.run(arguments)
executable = path.join(dir_dist, executable_name)
if os_platform == "windows":
executable += ".exe"
cprint("~~EXECUTABLE CREATED~~\n", "green")
# Create folder based on system version
dir_game = path.join(dir_os, executable_name)
print("Creating game folder...")
mkdir(dir_game)
print("Moving assets directory...")
dir_assets = path.join(dir_main, "assets")
dir_new_assets = path.join(dir_game, "assets")
copytree(dir_assets, dir_new_assets)
print("Moving executable...")
shcopy(executable, dir_game)
remove(executable)
cprint("~~GAME FOLDER CREATED~~\n", "green")
# Compress folder to .7z
if input("Zip file y/n?") == "y":
print("Creating 7z file...")
with SevenZipFile(dir_game + ".7z", "w") as archive:
archive.writeall(dir_game, executable_name)
cprint("~~GAME FOLDER COMPRESSED TO 7Z~~", "green")
# Cleanup
print("Cleaning up temp files...")
spec = path.join(dir_main, executable_name + ".spec")
build = path.join(dir_main, "build", executable_name)
# Remove spec files
if path.exists(spec):
msg = f"Removing: spec file - {path.basename(spec)}"
cprint(msg, "yellow")
remove(spec)
# Remove build files
if path.exists(build):
msg = f"Removing: build folder - {path.basename(build)}"
cprint(msg, "yellow")
rmtree(build)
# Finalize
text = f"\n~~Version {version} {os_platform}"
text += ": build created with no errors~~"
cprint(text, "green")
if __name__ == "__main__":
main()
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("TestDQMFileSaver1")
process.load("DQMServices.Components.test.MessageLogger_cfi")
process.load("DQMServices.Components.EDMtoMEConverter_cff")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:test_relval_generate.root')
)
process.p1 = cms.Path(process.EDMtoMEConverter*process.dqmSaver)
process.dqmSaver.convention = ''
|
from librosa.core import load
from librosa.feature import mfcc
import numpy as np
def normalize_gain(samples: np.ndarray) -> np.ndarray:
min_ = samples.min()
max_ = samples.max()
return (samples - min_) / (max_ - min_)
def mfccs(filepath: str, frame_ms: int, sliding_ms: int, n_mfccs: int) -> np.ndarray:
'''
Given a filepath, computes the Mel Frequency Cepstrum Coefficients
specified by n_mfccs using a frame width of frame_ms and a slide of
sliding_ms
'''
time_series, sr = load(filepath)
time_series = normalize_gain(time_series)
sr_ms = sr / 1000
return mfcc(time_series,
sr=sr,
n_mfcc=n_mfccs,
n_fft=int(frame_ms*sr_ms),
hop_length=int(sliding_ms*sr_ms))
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit/
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import drop_path, to_2tuple, trunc_normal_
from timm.models.registry import register_model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic',
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
**kwargs
}
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
def extra_repr(self) -> str:
return 'p={}'.format(self.drop_prob)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
# x = self.drop(x)
# commit this for the orignal BERT implement
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(
self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.,
proj_drop=0., window_size=None, attn_head_dim=None):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
if attn_head_dim is not None:
head_dim = attn_head_dim
all_head_dim = head_dim * self.num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=qkv_bias)
if window_size:
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = \
torch.zeros(size=(window_size[0] * window_size[1] + 1, ) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
else:
self.window_size = None
self.relative_position_bias_table = None
self.relative_position_index = None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(all_head_dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, rel_pos_bias=None):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
if self.relative_position_bias_table is not None:
relative_position_bias = \
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if rel_pos_bias is not None:
attn = attn + rel_pos_bias
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm,
window_size=None, attn_head_dim=None):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, window_size=window_size, attn_head_dim=attn_head_dim)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if init_values > 0:
self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
else:
self.gamma_1, self.gamma_2 = None, None
def forward(self, x, rel_pos_bias=None):
if self.gamma_1 is None:
x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias))
x = x + self.drop_path(self.mlp(self.norm2(x)))
else:
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias))
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x, **kwargs):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class RelativePositionBias(nn.Module):
def __init__(self, window_size, num_heads):
super().__init__()
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = \
torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
# trunc_normal_(self.relative_position_bias_table, std=.02)
def forward(self):
relative_position_bias = \
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
class VisionTransformer(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None,
use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False,
use_mean_pooling=True, init_scale=0.001):
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
# self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
if use_abs_pos_emb:
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
else:
self.pos_embed = None
self.pos_drop = nn.Dropout(p=drop_rate)
if use_shared_rel_pos_bias:
self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads)
else:
self.rel_pos_bias = None
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.use_rel_pos_bias = use_rel_pos_bias
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None)
for i in range(depth)])
self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim)
self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
# trunc_normal_(self.mask_token, std=.02)
trunc_normal_(self.head.weight, std=.02)
self.apply(self._init_weights)
self.fix_init_weight()
self.head.weight.data.mul_(init_scale)
self.head.bias.data.mul_(init_scale)
def fix_init_weight(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_num_layers(self):
return len(self.blocks)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
batch_size, seq_len, _ = x.size()
cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
x = self.pos_drop(x)
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
for blk in self.blocks:
x = blk(x, rel_pos_bias=rel_pos_bias)
x = self.norm(x)
if self.fc_norm is not None:
t = x[:, 1:, :]
return self.fc_norm(t.mean(1))
else:
return x[:, 0]
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
@register_model
def beit_small_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=384, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def beit_base_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def beit_base_patch16_384(pretrained=False, **kwargs):
model = VisionTransformer(
img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def beit_large_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def beit_large_patch16_384(pretrained=False, **kwargs):
model = VisionTransformer(
img_size=384, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def beit_large_patch16_512(pretrained=False, **kwargs):
model = VisionTransformer(
img_size=512, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
|
"""
This module defines classes for describing properties of a model.
"""
import collections
import collections.abc
from copy import deepcopy
from datetime import date, datetime
from typing import (
Dict,
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
Union,
Any,
Callable,
)
from decimal import Decimal
from iso8601 import parse_date # type: ignore
from itertools import chain
from . import abc
from .types import Types, MutableTypes
from .utilities import (
indent,
parameters_defaults,
properties_values,
qualified_name,
)
from .utilities.assertion import assert_is_instance
from .utilities.inspect import represent
from .utilities.types import UNDEFINED, Undefined, NoneType
from .abc import MarshallableTypes
from .version import Version
__all__: List[str] = [
"Property",
"Array",
"Boolean",
"Bytes",
"Date",
"Dictionary",
"Enumerated",
"Integer",
"Number",
"String",
"TYPES_PROPERTIES",
"has_mutable_types",
]
def _repr_keyword_argument_assignment(
argument: str,
value: MarshallableTypes,
defaults: Optional[Dict[str, MarshallableTypes]] = None,
) -> Optional[str]:
"""
Returns a string representation of an argument assignment, or `None`
if the argument value is equal to the default value for that argument
"""
if (defaults is not None) and (
(argument not in defaults)
or defaults[argument] == value
or value is None
):
return None
return " %s=%s," % (argument, indent(represent(value)))
def has_mutable_types(property: Union[abc.Property, type]) -> bool:
"""
This function returns `True` if modification of the `.types` member of a
property class or instance is permitted.
Parameters:
- property (sob.properties.Property|type)
"""
property_type: type
if isinstance(property, abc.Property):
property_type = type(property)
else:
assert issubclass(property, abc.Property)
property_type = property
return getattr(property_type, "_types") is None
class Property(abc.Property):
"""
This is the base class for defining a property.
Properties
- types ([type|Property]): One or more expected `type` or
`Property` instances. A list of more than one types and/or properties
results in a polymorphic interpretation wherein a value is
un-marshalled in accordance with each type or property in the list
(sequentially), until the value is un-marshalled without throwing a
`TypeError` or `ValueError`. If the list of types and/or properties
is exhausted without successfully un-marshalling the value, a
`TypeError` or `ValueError` error is raised.
- required (bool): If `True`โmarshalling a value for this property
will throw an error if the value is `None`. Please note that `None`
indicates a value was *not provided*. To indicate an *explicit* null
value, use `sob.properties.types.NULL`.
- versions ([str]|{str:Property}):
The parameter should be one of the following:
- A `set`, `tuple`, or `list` of version numbers to which this
property applies.
- A mapping of version numbers to an instance of
[Property](#Property) instances applicable to that version.
Version numbers prefixed by "<" indicating any version less than the
one specified, so "<3.0" indicates that this property is available in
versions prior to 3.0. The inverse is true for version numbers
prefixed by ">". ">=" and "<=" have similar meanings, but are
inclusive.
Versioning can be applied to a property by calling
`sob.meta.set_version` in the `__init__` method of an
`sob.model.Object` sub-class.
- name (str): The name of the property when loaded from or dumped into
a JSON object. Specifying a `name` facilitates mapping of PEP8
compliant property names to JSON or YAML attribute names which might
be incompatible with well-formatted python code due to various
reasons such as being camelCased, or being python keywords. To
infer an appropriate property name programmatically, use the utility
function `sob.utilities.string.property_name`.
"""
_types: Optional[abc.Types] = None
# noinspection PyShadowingNames
def __init__(
self,
types: Union[
abc.Types,
Sequence[Union[type, "Property"]],
type,
"Property",
Undefined,
None,
] = UNDEFINED,
name: Optional[str] = None,
required: bool = False,
versions: Optional[
Union[str, abc.Version, Iterable[Union[str, abc.Version]]]
] = None,
) -> None:
self._types: Optional[abc.Types] = getattr(type(self), "_types")
if types is not UNDEFINED:
setattr(self, "types", types)
self.name: Optional[str] = name
self.required: bool = required
self._versions: Optional[Sequence[abc.Version]] = None
if versions is not None:
setattr(self, "versions", versions)
@property # type: ignore
def types(self) -> Optional[abc.Types]:
return self._types
@types.setter
def types(
self,
types_or_properties: Union[
abc.Types,
Sequence[Union[type, abc.Property]],
type,
abc.Property,
None,
],
) -> None:
# If types are set at the class-level, don't touch them
if type(self)._types is not None:
raise TypeError(
f"`{qualified_name(type(self))}.types` is immutable"
)
if (types_or_properties is not None) and not isinstance(
types_or_properties, abc.Types
):
types_or_properties = MutableTypes(types_or_properties)
assert (types_or_properties is None) or isinstance(
types_or_properties, abc.Types
)
self._types = types_or_properties
@property # type: ignore
def versions(self) -> Optional[Sequence[abc.Version]]:
return self._versions
@versions.setter
def versions(
self,
versions: Optional[
Union[str, abc.Version, Iterable[Union[str, abc.Version]]]
] = None,
) -> None:
versions_tuple: Optional[Tuple[abc.Version, ...]] = None
if versions is not None:
assert_is_instance(
"versions",
versions,
(str, abc.Version, collections.abc.Iterable),
)
version: Union[str, abc.Version]
if isinstance(versions, str):
version = Version(versions)
versions_tuple = (version,)
elif isinstance(versions, abc.Version):
versions_tuple = (versions,)
else:
versions_list: List[abc.Version] = []
for version in versions:
if not isinstance(version, abc.Version):
version = Version(version)
versions_list.append(version)
versions_tuple = tuple(versions_list)
self._versions = versions_tuple
def __repr__(self) -> str:
lines = [qualified_name(type(self)) + "("]
defaults: Dict[str, Any] = parameters_defaults(
getattr(self, "__init__")
)
for property_name, value in properties_values(self):
argument_representation = _repr_keyword_argument_assignment(
property_name, value, defaults
)
if argument_representation is not None:
lines.append(argument_representation)
lines[-1] = lines[-1].rstrip(",")
lines.append(")")
if len(lines) > 2:
return "\n".join(lines)
else:
return "".join(lines)
def __copy__(self) -> abc.Property:
new_instance = self.__class__()
attribute_name: str
for attribute_name in dir(self):
if attribute_name[0] != "_" and attribute_name != "data":
value = getattr(self, attribute_name)
if not callable(value):
setattr(new_instance, attribute_name, value)
return new_instance
def __deepcopy__(self, memo: dict) -> abc.Property:
new_instance: abc.Property = self.__class__()
attribute: str
value: Any
types_is_mutable: bool = bool(type(self)._types is None)
for attribute, value in properties_values(self):
if attribute != "types" or types_is_mutable:
setattr(new_instance, attribute, deepcopy(value, memo=memo))
return new_instance
class String(Property, abc.String):
"""
See `sob.properties.Property`
"""
_types: abc.Types = Types((str,)) # type: ignore
def __init__(
self,
name: Optional[str] = None,
required: bool = False,
versions: Optional[
Union[str, abc.Version, Iterable[Union[str, abc.Version]]]
] = None,
) -> None:
super().__init__(
name=name,
required=required,
versions=versions,
)
def _date2str(value: date) -> str:
return value.isoformat()
class Date(Property, abc.Date):
"""
...See `sob.properties.Property`
+ Parameters:
- date2str (collections.Callable): A function, taking one argument (a
python `date` json_object), and returning a date string in the
desired format. The default is `date.isoformat`--returning an
iso8601 compliant date string.
- str2date (collections.Callable): A function, taking one argument (a
date string), and returning a python `date` object. By default,
this is `iso8601.parse_date`.
"""
_types: Optional[abc.Types] = Types((date,))
def __init__(
self,
name: Optional[str] = None,
required: bool = False,
versions: Optional[
Union[str, abc.Version, Iterable[Union[str, abc.Version]]]
] = None,
date2str: Callable[[date], str] = _date2str,
str2date: Callable[[str], date] = parse_date,
) -> None:
super().__init__(
name=name,
required=required,
versions=versions,
)
self._date2str = date2str
self._str2date = str2date
def date2str(self, value: date) -> str:
return self._date2str(value)
def str2date(self, value: str) -> date:
return self._str2date(value)
def _datetime2str(value: datetime) -> str:
return value.isoformat()
class DateTime(Property, abc.DateTime):
"""
(See [`sob.properties.Property`](#Property))
+ Parameters:
- datetime2str (collections.Callable): A function, taking one argument
(a python `datetime` json_object), and returning a date-time string
in the desired format. The default is `datetime.isoformat`--returning
an iso8601 compliant date-time string.
- str2datetime (collections.Callable): A function, taking one argument
(a datetime string), and returning a python `datetime` json_object.
By default, this is `iso8601.parse_date`.
"""
_types: abc.Types = Types((datetime,)) # type: ignore
def __init__(
self,
name: Optional[str] = None,
required: bool = False,
versions: Optional[
Union[str, abc.Version, Iterable[Union[str, abc.Version]]]
] = None,
datetime2str: Callable[[datetime], str] = _datetime2str,
str2datetime: Callable[[str], datetime] = parse_date,
) -> None:
self._datetime2str = datetime2str
self._str2datetime = str2datetime
super().__init__(
name=name,
required=required,
versions=versions,
)
def datetime2str(self, value: datetime) -> str:
return self._datetime2str(value)
def str2datetime(self, value: str) -> datetime:
return self._str2datetime(value)
class Bytes(Property, abc.Bytes):
"""
(See [`sob.properties.Property`](#Property))
This class represents a property with binary values
"""
_types: abc.Types = Types((bytes,)) # type: ignore
def __init__(
self,
name: Optional[str] = None,
required: bool = False,
versions: Optional[
Union[str, abc.Version, Iterable[Union[str, abc.Version]]]
] = None,
) -> None:
super().__init__(
name=name,
required=required,
versions=versions,
)
class Enumerated(Property, abc.Enumerated):
"""
Parameters:
This class accepts the following keyword parameters in *addition* to all
parameters applicable to the base class [Property](#Property).
- values ([typing.Any]): A list or set of possible values.
Properties:
This class exposes public properties matching its keyword parameters.
"""
# noinspection PyShadowingNames
def __init__(
self,
types: Union[
abc.Types,
Sequence[Union[type, "Property"]],
type,
"Property",
Undefined,
None,
] = UNDEFINED,
values: Optional[Iterable[MarshallableTypes]] = None,
name: Optional[str] = None,
required: bool = False,
versions: Optional[
Union[str, abc.Version, Iterable[Union[str, abc.Version]]]
] = None,
) -> None:
self._values: Optional[Set[MarshallableTypes]] = None
super().__init__(
types=types, name=name, required=required, versions=versions
)
setattr(self, "values", values)
@property # type: ignore
def values(self) -> Optional[Set[MarshallableTypes]]:
return self._values
@values.setter
def values(self, values: Optional[Iterable[MarshallableTypes]]) -> None:
if values is None:
self._values = None
else:
assert_is_instance("values", values, collections.abc.Iterable)
self._values = set(values)
class Number(Property, abc.Number):
"""
See `sob.properties.Property`
"""
_types: abc.Types = Types((Decimal, float, int)) # type: ignore
def __init__(
self,
name: Optional[str] = None,
required: bool = False,
versions: Optional[
Union[str, abc.Version, Iterable[Union[str, abc.Version]]]
] = None,
) -> None:
super().__init__(name=name, required=required, versions=versions)
class Integer(Property, abc.Integer):
"""
See `sob.properties.Property`
"""
_types: abc.Types = Types((int,)) # type: ignore
def __init__(
self,
name: Optional[str] = None,
required: bool = False,
versions: Optional[
Union[str, abc.Version, Iterable[Union[str, abc.Version]]]
] = None,
) -> None:
super().__init__(
name=name,
required=required,
versions=versions,
)
class Boolean(Property, abc.Boolean):
"""
See `sob.properties.Property`
"""
_types: abc.Types = Types((bool,)) # type: ignore
def __init__(
self,
name: Optional[str] = None,
required: bool = False,
versions: Optional[
Union[str, abc.Version, Iterable[Union[str, abc.Version]]]
] = None,
) -> None:
super().__init__(
name=name,
required=required,
versions=versions,
)
class Array(Property, abc.ArrayProperty):
"""
See `sob.properties.Property`...
+ Properties:
- item_types (type|Property|[type|Property]): The type(s) of values/objects
contained in the array. Similar to
`sob.properties.Property().types`, but applied to items in the
array, not the array itself.
"""
_types: abc.Types = Types((abc.Array,)) # type: ignore
_item_types: Optional[abc.Types] = None
def __init__(
self,
item_types: Union[
type, Sequence[Union[type, Property]], Undefined, abc.Types, None
] = UNDEFINED,
name: Optional[str] = None,
required: bool = False,
versions: Optional[
Union[str, abc.Version, Iterable[Union[str, abc.Version]]]
] = None,
) -> None:
self._item_types: Optional[Types] = getattr(type(self), "_item_types")
if item_types is not UNDEFINED:
self.item_types = item_types # type: ignore
super().__init__(
name=name,
required=required,
versions=versions,
)
@property # type: ignore
def item_types(self) -> Optional[abc.Types]: # type: ignore
return self._item_types
@item_types.setter # type: ignore
def item_types(
self,
item_types: Union[
type,
abc.Property,
Sequence[Union[type, abc.Property]],
abc.Types,
None,
],
) -> None:
if isinstance(item_types, abc.Types):
if not isinstance(item_types, abc.MutableTypes):
item_types = MutableTypes(item_types)
elif item_types is not None:
item_types_list: List[Union[type, abc.Property]] = []
if isinstance(item_types, (type, abc.Property)):
item_types_list.append(item_types)
else:
assert isinstance(item_types, Sequence)
for item_type in item_types:
assert isinstance(item_type, (type, abc.Property))
item_types_list.append(item_type)
item_types = MutableTypes(item_types_list)
assert isinstance(item_types, (abc.Types, NoneType))
self._item_types = item_types
class Dictionary(Property, abc.DictionaryProperty):
"""
See `sob.properties.Property`...
+ Properties:
- value_types (type|Property|[type|Property]): The type(s) of
values/objects comprising the mapped values. Similar to
`sob.properties.Property.types`, but applies to *values* in the
dictionary object, not the dictionary itself.
"""
_types: abc.Types = Types((abc.Dictionary,)) # type: ignore
_value_types: Optional[abc.Types] = None
def __init__(
self,
value_types: Optional[
Union[type, Sequence[Union[type, Property]], Undefined]
] = UNDEFINED,
name: Optional[str] = None,
required: bool = False,
versions: Optional[
Union[str, abc.Version, Iterable[Union[str, abc.Version]]]
] = None,
) -> None:
self._value_types: Optional[abc.Types] = getattr(
type(self), "_value_types"
)
if value_types is not UNDEFINED:
self.value_types = value_types # type: ignore
super().__init__(
name=name,
required=required,
versions=versions,
)
@property # type: ignore
def value_types(self) -> Optional[abc.Types]:
return self._value_types
@value_types.setter
def value_types(
self,
value_types: Union[
Sequence[Union[type, abc.Property]], abc.Types, None
],
) -> None:
"""
A sequence of types and/or `sob.properties.Property` instances.
If more than one type or property definition is provided,
un-marshalling is attempted using each `value_type`, in sequential
order. If a value could be cast into more than one of the `types`
without throwing a `ValueError`, `TypeError`, or
`sob.errors.ValidationError`, the value type occurring *first* in the
sequence will be used.
"""
if (value_types is not None) and not isinstance(
value_types, abc.MutableTypes
):
value_types = MutableTypes(value_types)
assert isinstance(value_types, (abc.Types, NoneType))
self._value_types = value_types
# This constant maps data types to their corresponding properties
TYPES_PROPERTIES: Dict[type, type] = {
type_: property_class
for type_, property_class in chain(
*map(
lambda property_class: (
(type_, property_class)
for type_ in getattr(property_class, "_types")
),
(
property_class
for property_class in locals().values()
if (
isinstance(property_class, type)
and issubclass(property_class, Property)
and getattr(property_class, "_types")
)
),
)
)
}
|
import FWCore.ParameterSet.Config as cms
from L1Trigger.L1TMuonEndCap.fakeEmtfParams_cff import *
L1TMuonEndCapForestOnlineProd = cms.ESProducer("L1TMuonEndCapForestOnlineProd",
onlineAuthentication = cms.string('.'),
forceGeneration = cms.bool(False),
onlineDB = cms.string('oracle://CMS_OMDS_LB/CMS_TRG_R'),
transactionSafe = cms.bool(True) # any value has no effect on this particular producer
)
|
#!/usr/bin/env python
#
# This script generates a BPF program with structure inspired by trace.py. The
# generated program operates on PID-indexed stacks. Generally speaking,
# bookkeeping is done at every intermediate function kprobe/kretprobe to enforce
# the goal of "fail iff this call chain and these predicates".
#
# Top level functions(the ones at the end of the call chain) are responsible for
# creating the pid_struct and deleting it from the map in kprobe and kretprobe
# respectively.
#
# Intermediate functions(between should_fail_whatever and the top level
# functions) are responsible for updating the stack to indicate "I have been
# called and one of my predicate(s) passed" in their entry probes. In their exit
# probes, they do the opposite, popping their stack to maintain correctness.
# This implementation aims to ensure correctness in edge cases like recursive
# calls, so there's some additional information stored in pid_struct for that.
#
# At the bottom level function(should_fail_whatever), we do a simple check to
# ensure all necessary calls/predicates have passed before error injection.
#
# Note: presently there are a few hacks to get around various rewriter/verifier
# issues.
#
# Note: this tool requires:
# - CONFIG_BPF_KPROBE_OVERRIDE
#
# USAGE: inject [-h] [-I header] [-P probability] [-v] mode spec
#
# Copyright (c) 2018 Facebook, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 16-Mar-2018 Howard McLauchlan Created this.
import argparse
import re
from bcc import BPF
class Probe:
errno_mapping = {
"kmalloc": "-ENOMEM",
"bio": "-EIO",
}
@classmethod
def configure(cls, mode, probability):
cls.mode = mode
cls.probability = probability
def __init__(self, func, preds, length, entry):
# length of call chain
self.length = length
self.func = func
self.preds = preds
self.is_entry = entry
def _bail(self, err):
raise ValueError("error in probe '%s': %s" %
(self.spec, err))
def _get_err(self):
return Probe.errno_mapping[Probe.mode]
def _get_if_top(self):
# ordering guarantees that if this function is top, the last tup is top
chk = self.preds[0][1] == 0
if not chk:
return ""
if Probe.probability == 1:
early_pred = "false"
else:
early_pred = "bpf_get_prandom_u32() > %s" % str(int((1<<32)*Probe.probability))
# init the map
# dont do an early exit here so the singular case works automatically
# have an early exit for probability option
enter = """
/*
* Early exit for probability case
*/
if (%s)
return 0;
/*
* Top level function init map
*/
struct pid_struct p_struct = {0, 0};
m.insert(&pid, &p_struct);
""" % early_pred
# kill the entry
exit = """
/*
* Top level function clean up map
*/
m.delete(&pid);
"""
return enter if self.is_entry else exit
def _get_heading(self):
# we need to insert identifier and ctx into self.func
# gonna make a lot of formatting assumptions to make this work
left = self.func.find("(")
right = self.func.rfind(")")
# self.event and self.func_name need to be accessible
self.event = self.func[0:left]
self.func_name = self.event + ("_entry" if self.is_entry else "_exit")
func_sig = "struct pt_regs *ctx"
# assume theres something in there, no guarantee its well formed
if right > left + 1 and self.is_entry:
func_sig += ", " + self.func[left + 1:right]
return "int %s(%s)" % (self.func_name, func_sig)
def _get_entry_logic(self):
# there is at least one tup(pred, place) for this function
text = """
if (p->conds_met >= %s)
return 0;
if (p->conds_met == %s && %s) {
p->stack[%s] = p->curr_call;
p->conds_met++;
}"""
text = text % (self.length, self.preds[0][1], self.preds[0][0],
self.preds[0][1])
# for each additional pred
for tup in self.preds[1:]:
text += """
else if (p->conds_met == %s && %s) {
p->stack[%s] = p->curr_call;
p->conds_met++;
}
""" % (tup[1], tup[0], tup[1])
return text
def _generate_entry(self):
prog = self._get_heading() + """
{
u32 pid = bpf_get_current_pid_tgid();
%s
struct pid_struct *p = m.lookup(&pid);
if (!p)
return 0;
/*
* preparation for predicate, if necessary
*/
%s
/*
* Generate entry logic
*/
%s
p->curr_call++;
return 0;
}"""
prog = prog % (self._get_if_top(), self.prep, self._get_entry_logic())
return prog
# only need to check top of stack
def _get_exit_logic(self):
text = """
if (p->conds_met < 1 || p->conds_met >= %s)
return 0;
if (p->stack[p->conds_met - 1] == p->curr_call)
p->conds_met--;
"""
return text % str(self.length + 1)
def _generate_exit(self):
prog = self._get_heading() + """
{
u32 pid = bpf_get_current_pid_tgid();
struct pid_struct *p = m.lookup(&pid);
if (!p)
return 0;
p->curr_call--;
/*
* Generate exit logic
*/
%s
%s
return 0;
}"""
prog = prog % (self._get_exit_logic(), self._get_if_top())
return prog
# Special case for should_fail_whatever
def _generate_bottom(self):
pred = self.preds[0][0]
text = self._get_heading() + """
{
/*
* preparation for predicate, if necessary
*/
%s
/*
* If this is the only call in the chain and predicate passes
*/
if (%s == 1 && %s) {
bpf_override_return(ctx, %s);
return 0;
}
u32 pid = bpf_get_current_pid_tgid();
struct pid_struct *p = m.lookup(&pid);
if (!p)
return 0;
/*
* If all conds have been met and predicate passes
*/
if (p->conds_met == %s && %s)
bpf_override_return(ctx, %s);
return 0;
}"""
return text % (self.prep, self.length, pred, self._get_err(),
self.length - 1, pred, self._get_err())
# presently parses and replaces STRCMP
# STRCMP exists because string comparison is inconvenient and somewhat buggy
# https://github.com/iovisor/bcc/issues/1617
def _prepare_pred(self):
self.prep = ""
for i in range(len(self.preds)):
new_pred = ""
pred = self.preds[i][0]
place = self.preds[i][1]
start, ind = 0, 0
while start < len(pred):
ind = pred.find("STRCMP(", start)
if ind == -1:
break
new_pred += pred[start:ind]
# 7 is len("STRCMP(")
start = pred.find(")", start + 7) + 1
# then ind ... start is STRCMP(...)
ptr, literal = pred[ind + 7:start - 1].split(",")
literal = literal.strip()
# x->y->z, some string literal
# we make unique id with place_ind
uuid = "%s_%s" % (place, ind)
unique_bool = "is_true_%s" % uuid
self.prep += """
char *str_%s = %s;
bool %s = true;\n""" % (uuid, ptr.strip(), unique_bool)
check = "\t%s &= *(str_%s++) == '%%s';\n" % (unique_bool, uuid)
for ch in literal:
self.prep += check % ch
self.prep += check % r'\0'
new_pred += unique_bool
new_pred += pred[start:]
self.preds[i] = (new_pred, place)
def generate_program(self):
# generate code to work around various rewriter issues
self._prepare_pred()
# special case for bottom
if self.preds[-1][1] == self.length - 1:
return self._generate_bottom()
return self._generate_entry() if self.is_entry else self._generate_exit()
def attach(self, bpf):
if self.is_entry:
bpf.attach_kprobe(event=self.event,
fn_name=self.func_name)
else:
bpf.attach_kretprobe(event=self.event,
fn_name=self.func_name)
class Tool:
examples ="""
EXAMPLES:
# ./inject.py kmalloc -v 'SyS_mount()'
Fails all calls to syscall mount
# ./inject.py kmalloc -v '(true) => SyS_mount()(true)'
Explicit rewriting of above
# ./inject.py kmalloc -v 'mount_subtree() => btrfs_mount()'
Fails btrfs mounts only
# ./inject.py kmalloc -v 'd_alloc_parallel(struct dentry *parent, const struct \\
qstr *name)(STRCMP(name->name, 'bananas'))'
Fails dentry allocations of files named 'bananas'
# ./inject.py kmalloc -v -P 0.01 'SyS_mount()'
Fails calls to syscall mount with 1% probability
"""
# add cases as necessary
error_injection_mapping = {
"kmalloc": "should_failslab(struct kmem_cache *s, gfp_t gfpflags)",
"bio": "should_fail_bio(struct bio *bio)",
}
def __init__(self):
parser = argparse.ArgumentParser(description="Fail specified kernel" +
" functionality when call chain and predicates are met",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=Tool.examples)
parser.add_argument(dest="mode", choices=['kmalloc','bio'],
help="indicate which base kernel function to fail")
parser.add_argument(metavar="spec", dest="spec",
help="specify call chain")
parser.add_argument("-I", "--include", action="append",
metavar="header",
help="additional header files to include in the BPF program")
parser.add_argument("-P", "--probability", default=1,
metavar="probability", type=float,
help="probability that this call chain will fail")
parser.add_argument("-v", "--verbose", action="store_true",
help="print BPF program")
self.args = parser.parse_args()
self.program = ""
self.spec = self.args.spec
self.map = {}
self.probes = []
self.key = Tool.error_injection_mapping[self.args.mode]
# create_probes and associated stuff
def _create_probes(self):
self._parse_spec()
Probe.configure(self.args.mode, self.args.probability)
# self, func, preds, total, entry
# create all the pair probes
for fx, preds in self.map.items():
# do the enter
self.probes.append(Probe(fx, preds, self.length, True))
if self.key == fx:
continue
# do the exit
self.probes.append(Probe(fx, preds, self.length, False))
def _parse_frames(self):
# sentinel
data = self.spec + '\0'
start, count = 0, 0
frames = []
cur_frame = []
i = 0
last_frame_added = 0
while i < len(data):
# improper input
if count < 0:
raise Exception("Check your parentheses")
c = data[i]
count += c == '('
count -= c == ')'
if not count:
if c == '\0' or (c == '=' and data[i + 1] == '>'):
# This block is closing a chunk. This means cur_frame must
# have something in it.
if not cur_frame:
raise Exception("Cannot parse spec, missing parens")
if len(cur_frame) == 2:
frame = tuple(cur_frame)
elif cur_frame[0][0] == '(':
frame = self.key, cur_frame[0]
else:
frame = cur_frame[0], '(true)'
frames.append(frame)
del cur_frame[:]
i += 1
start = i + 1
elif c == ')':
cur_frame.append(data[start:i + 1].strip())
start = i + 1
last_frame_added = start
i += 1
# We only permit spaces after the last frame
if self.spec[last_frame_added:].strip():
raise Exception("Invalid characters found after last frame");
# improper input
if count:
raise Exception("Check your parentheses")
return frames
def _parse_spec(self):
frames = self._parse_frames()
frames.reverse()
absolute_order = 0
for f in frames:
# default case
func, pred = f[0], f[1]
if not self._validate_predicate(pred):
raise Exception("Invalid predicate")
if not self._validate_identifier(func):
raise Exception("Invalid function identifier")
tup = (pred, absolute_order)
if func not in self.map:
self.map[func] = [tup]
else:
self.map[func].append(tup)
absolute_order += 1
if self.key not in self.map:
self.map[self.key] = [('(true)', absolute_order)]
absolute_order += 1
self.length = absolute_order
def _validate_identifier(self, func):
# We've already established paren balancing. We will only look for
# identifier validity here.
paren_index = func.find("(")
potential_id = func[:paren_index]
pattern = '[_a-zA-z][_a-zA-Z0-9]*$'
if re.match(pattern, potential_id):
return True
return False
def _validate_predicate(self, pred):
if len(pred) > 0 and pred[0] == "(":
open = 1
for i in range(1, len(pred)):
if pred[i] == "(":
open += 1
elif pred[i] == ")":
open -= 1
if open != 0:
# not well formed, break
return False
return True
def _def_pid_struct(self):
text = """
struct pid_struct {
u64 curr_call; /* book keeping to handle recursion */
u64 conds_met; /* stack pointer */
u64 stack[%s];
};
""" % self.length
return text
def _attach_probes(self):
self.bpf = BPF(text=self.program)
for p in self.probes:
p.attach(self.bpf)
def _generate_program(self):
# leave out auto includes for now
self.program += '#include <linux/mm.h>\n'
for include in (self.args.include or []):
self.program += "#include <%s>\n" % include
self.program += self._def_pid_struct()
self.program += "BPF_HASH(m, u32, struct pid_struct);\n"
for p in self.probes:
self.program += p.generate_program() + "\n"
if self.args.verbose:
print(self.program)
def _main_loop(self):
while True:
self.bpf.perf_buffer_poll()
def run(self):
self._create_probes()
self._generate_program()
self._attach_probes()
self._main_loop()
if __name__ == "__main__":
Tool().run()
|
"""
API operations on User objects.
"""
import copy
import json
import logging
import re
from collections import OrderedDict
from markupsafe import escape
from sqlalchemy import (
false,
or_,
true
)
from galaxy import (
exceptions,
util,
web
)
from galaxy.exceptions import ObjectInvalid
from galaxy.managers import users
from galaxy.security.validate_user_input import (
validate_email,
validate_password,
validate_publicname
)
from galaxy.tools.toolbox.filters import FilterFactory
from galaxy.util import (
docstring_trim,
listify
)
from galaxy.web import (
expose_api,
expose_api_anonymous
)
from galaxy.web.form_builder import AddressField
from galaxy.webapps.base.controller import (
BaseAPIController,
BaseUIController,
CreatesApiKeysMixin,
UsesFormDefinitionsMixin,
UsesTagsMixin
)
log = logging.getLogger(__name__)
class UserAPIController(BaseAPIController, UsesTagsMixin, CreatesApiKeysMixin, BaseUIController, UsesFormDefinitionsMixin):
def __init__(self, app):
super().__init__(app)
self.user_manager = users.UserManager(app)
self.user_serializer = users.UserSerializer(app)
self.user_deserializer = users.UserDeserializer(app)
@expose_api
def index(self, trans, deleted='False', f_email=None, f_name=None, f_any=None, **kwd):
"""
GET /api/users
GET /api/users/deleted
Displays a collection (list) of users.
:param deleted: (optional) If true, show deleted users
:type deleted: bool
:param f_email: (optional) An email address to filter on. (Non-admin
users can only use this if ``expose_user_email`` is ``True`` in
galaxy.ini)
:type f_email: str
:param f_name: (optional) A username to filter on. (Non-admin users
can only use this if ``expose_user_name`` is ``True`` in
galaxy.ini)
:type f_name: str
:param f_any: (optional) Filter on username OR email. (Non-admin users
can use this, the email filter and username filter will
only be active if their corresponding ``expose_user_*`` is
``True`` in galaxy.ini)
:type f_any: str
"""
rval = []
query = trans.sa_session.query(trans.app.model.User)
deleted = util.string_as_bool(deleted)
if f_email and (trans.user_is_admin or trans.app.config.expose_user_email):
query = query.filter(trans.app.model.User.email.like("%%%s%%" % f_email))
if f_name and (trans.user_is_admin or trans.app.config.expose_user_name):
query = query.filter(trans.app.model.User.username.like("%%%s%%" % f_name))
if f_any:
if trans.user_is_admin:
query = query.filter(or_(
trans.app.model.User.email.like("%%%s%%" % f_any),
trans.app.model.User.username.like("%%%s%%" % f_any)
))
else:
if trans.app.config.expose_user_email and trans.app.config.expose_user_name:
query = query.filter(or_(
trans.app.model.User.email.like("%%%s%%" % f_any),
trans.app.model.User.username.like("%%%s%%" % f_any)
))
elif trans.app.config.expose_user_email:
query = query.filter(trans.app.model.User.email.like("%%%s%%" % f_any))
elif trans.app.config.expose_user_name:
query = query.filter(trans.app.model.User.username.like("%%%s%%" % f_any))
if deleted:
# only admins can see deleted users
if not trans.user_is_admin:
return []
query = query.filter(trans.app.model.User.table.c.deleted == true())
else:
# special case: user can see only their own user
# special case2: if the galaxy admin has specified that other user email/names are
# exposed, we don't want special case #1
if not trans.user_is_admin and not trans.app.config.expose_user_name and not trans.app.config.expose_user_email:
item = trans.user.to_dict(value_mapper={'id': trans.security.encode_id})
return [item]
query = query.filter(trans.app.model.User.table.c.deleted == false())
for user in query:
item = user.to_dict(value_mapper={'id': trans.security.encode_id})
# If NOT configured to expose_email, do not expose email UNLESS the user is self, or
# the user is an admin
if user is not trans.user and not trans.user_is_admin:
expose_keys = ["id"]
if trans.app.config.expose_user_name:
expose_keys.append("username")
if trans.app.config.expose_user_email:
expose_keys.append("email")
new_item = {}
for key, value in item.items():
if key in expose_keys:
new_item[key] = value
item = new_item
# TODO: move into api_values
rval.append(item)
return rval
@expose_api_anonymous
def show(self, trans, id, deleted='False', **kwd):
"""
GET /api/users/{encoded_id}
GET /api/users/deleted/{encoded_id}
GET /api/users/current
Displays information about a user.
"""
deleted = util.string_as_bool(deleted)
try:
# user is requesting data about themselves
if id == "current":
# ...and is anonymous - return usage and quota (if any)
if not trans.user:
item = self.anon_user_api_value(trans)
return item
# ...and is logged in - return full
else:
user = trans.user
else:
user = self.get_user(trans, id, deleted=deleted)
# check that the user is requesting themselves (and they aren't del'd) unless admin
if not trans.user_is_admin:
assert trans.user == user
assert not user.deleted
except exceptions.ItemDeletionException:
raise
except Exception:
raise exceptions.RequestParameterInvalidException('Invalid user id specified', id=id)
return self.user_serializer.serialize_to_view(user, view='detailed')
@expose_api
def create(self, trans, payload, **kwd):
"""
POST /api/users
Creates a new Galaxy user.
"""
if not trans.app.config.allow_user_creation and not trans.user_is_admin:
raise exceptions.ConfigDoesNotAllowException('User creation is not allowed in this Galaxy instance')
if trans.app.config.use_remote_user and trans.user_is_admin:
user = trans.get_or_create_remote_user(remote_user_email=payload['remote_user_email'])
elif trans.user_is_admin:
username = payload['username']
email = payload['email']
password = payload['password']
message = "\n".join((validate_email(trans, email),
validate_password(trans, password, password),
validate_publicname(trans, username))).rstrip()
if message:
raise exceptions.RequestParameterInvalidException(message)
else:
user = self.user_manager.create(email=email, username=username, password=password)
else:
raise exceptions.NotImplemented()
item = user.to_dict(view='element', value_mapper={'id': trans.security.encode_id,
'total_disk_usage': float})
return item
@expose_api
def update(self, trans, id, payload, **kwd):
"""
update( self, trans, id, payload, **kwd )
* PUT /api/users/{id}
updates the values for the item with the given ``id``
:type id: str
:param id: the encoded id of the item to update
:type payload: dict
:param payload: a dictionary of new attribute values
:rtype: dict
:returns: an error object if an error occurred or a dictionary containing
the serialized item after any changes
"""
current_user = trans.user
user_to_update = self.user_manager.by_id(self.decode_id(id))
# only allow updating other users if they're admin
editing_someone_else = current_user != user_to_update
is_admin = trans.api_inherit_admin or self.user_manager.is_admin(current_user)
if editing_someone_else and not is_admin:
raise exceptions.InsufficientPermissionsException('You are not allowed to update that user', id=id)
self.user_deserializer.deserialize(user_to_update, payload, user=current_user, trans=trans)
return self.user_serializer.serialize_to_view(user_to_update, view='detailed')
@expose_api
def delete(self, trans, id, **kwd):
"""
DELETE /api/users/{id}
delete the user with the given ``id``
Functionality restricted based on admin status
:param id: the encoded id of the user to delete
:type id: str
:param purge: (optional) if True, purge the user
:type purge: bool
"""
user_to_update = self.user_manager.by_id(self.decode_id(id))
if trans.user_is_admin:
purge = util.string_as_bool(kwd.get('purge', False))
if purge:
log.debug("Purging user %s", user_to_update)
self.user_manager.purge(user_to_update)
else:
self.user_manager.delete(user_to_update)
else:
if trans.user == user_to_update:
self.user_manager.delete(user_to_update)
else:
raise exceptions.InsufficientPermissionsException('You may only delete your own account.', id=id)
return self.user_serializer.serialize_to_view(user_to_update, view='detailed')
@web.require_admin
@expose_api
def undelete(self, trans, id, **kwd):
"""
POST /api/users/deleted/{id}/undelete
Undelete the user with the given ``id``
:param id: the encoded id of the user to be undeleted
:type id: str
"""
user = self.get_user(trans, id)
self.user_manager.undelete(user)
return self.user_serializer.serialize_to_view(user, view='detailed')
# TODO: move to more basal, common resource than this
def anon_user_api_value(self, trans):
"""Return data for an anonymous user, truncated to only usage and quota_percent"""
usage = trans.app.quota_agent.get_usage(trans)
percent = trans.app.quota_agent.get_percent(trans=trans, usage=usage)
return {'total_disk_usage': int(usage),
'nice_total_disk_usage': util.nice_size(usage),
'quota_percent': percent}
def _get_extra_user_preferences(self, trans):
"""
Reads the file user_preferences_extra_conf.yml to display
admin defined user informations
"""
return trans.app.config.user_preferences_extra['preferences']
def _build_extra_user_pref_inputs(self, preferences, user):
"""
Build extra user preferences inputs list.
Add values to the fields if present
"""
if not preferences:
return []
extra_pref_inputs = list()
# Build sections for different categories of inputs
for item, value in preferences.items():
if value is not None:
input_fields = copy.deepcopy(value["inputs"])
for input in input_fields:
help = input.get('help', '')
required = 'Required' if util.string_as_bool(input.get('required')) else ''
if help:
input['help'] = "{} {}".format(help, required)
else:
input['help'] = required
field = item + '|' + input['name']
for data_item in user.extra_preferences:
if field in data_item:
input['value'] = user.extra_preferences[data_item]
extra_pref_inputs.append({'type': 'section', 'title': value['description'], 'name': item, 'expanded': True, 'inputs': input_fields})
return extra_pref_inputs
@expose_api
def get_information(self, trans, id, **kwd):
"""
GET /api/users/{id}/information/inputs
Return user details such as username, email, addresses etc.
:param id: the encoded id of the user
:type id: str
"""
user = self._get_user(trans, id)
email = user.email
username = user.username
inputs = list()
inputs.append({
'id': 'email_input',
'name': 'email',
'type': 'text',
'label': 'Email address',
'value': email,
'help': 'If you change your email address you will receive an activation link in the new mailbox and you have to activate your account by visiting it.'})
if trans.webapp.name == 'galaxy':
inputs.append({
'id': 'name_input',
'name': 'username',
'type': 'text',
'label': 'Public name',
'value': username,
'help': 'Your public name is an identifier that will be used to generate addresses for information you share publicly. Public names must be at least three characters in length and contain only lower-case letters, numbers, and the "-" character.'})
info_form_models = self.get_all_forms(trans, filter=dict(deleted=False), form_type=trans.app.model.FormDefinition.types.USER_INFO)
if info_form_models:
info_form_id = trans.security.encode_id(user.values.form_definition.id) if user.values else None
info_field = {
'type': 'conditional',
'name': 'info',
'cases': [],
'test_param': {
'name': 'form_id',
'label': 'User type',
'type': 'select',
'value': info_form_id,
'help': '',
'data': []
}
}
for f in info_form_models:
values = None
if info_form_id == trans.security.encode_id(f.id) and user.values:
values = user.values.content
info_form = f.to_dict(user=user, values=values, security=trans.security)
info_field['test_param']['data'].append({'label': info_form['name'], 'value': info_form['id']})
info_field['cases'].append({'value': info_form['id'], 'inputs': info_form['inputs']})
inputs.append(info_field)
address_inputs = [{'type': 'hidden', 'name': 'id', 'hidden': True}]
for field in AddressField.fields():
address_inputs.append({'type': 'text', 'name': field[0], 'label': field[1], 'help': field[2]})
address_repeat = {'title': 'Address', 'name': 'address', 'type': 'repeat', 'inputs': address_inputs, 'cache': []}
address_values = [address.to_dict(trans) for address in user.addresses]
for address in address_values:
address_cache = []
for input in address_inputs:
input_copy = input.copy()
input_copy['value'] = address.get(input['name'])
address_cache.append(input_copy)
address_repeat['cache'].append(address_cache)
inputs.append(address_repeat)
# Build input sections for extra user preferences
extra_user_pref = self._build_extra_user_pref_inputs(self._get_extra_user_preferences(trans), user)
for item in extra_user_pref:
inputs.append(item)
else:
if user.active_repositories:
inputs.append(dict(id='name_input', name='username', label='Public name:', type='hidden', value=username, help='You cannot change your public name after you have created a repository in this tool shed.'))
else:
inputs.append(dict(id='name_input', name='username', label='Public name:', type='text', value=username, help='Your public name provides a means of identifying you publicly within this tool shed. Public names must be at least three characters in length and contain only lower-case letters, numbers, and the "-" character. You cannot change your public name after you have created a repository in this tool shed.'))
return {
'email': email,
'username': username,
'addresses': [address.to_dict(trans) for address in user.addresses],
'inputs': inputs,
}
@expose_api
def set_information(self, trans, id, payload={}, **kwd):
"""
PUT /api/users/{id}/information/inputs
Save a user's email, username, addresses etc.
:param id: the encoded id of the user
:type id: str
:param payload: data with new settings
:type payload: dict
"""
user = self._get_user(trans, id)
# Update email
if 'email' in payload:
email = payload.get('email')
message = self._validate_email(email) or validate_email(trans, email, user)
if message:
raise exceptions.RequestParameterInvalidException(message)
if user.email != email:
# Update user email and user's private role name which must match
private_role = trans.app.security_agent.get_private_user_role(user)
private_role.name = email
private_role.description = 'Private role for ' + email
user.email = email
trans.sa_session.add(user)
trans.sa_session.add(private_role)
trans.sa_session.flush()
if trans.app.config.user_activation_on:
# Deactivate the user if email was changed and activation is on.
user.active = False
if self.user_manager.send_activation_email(trans, user.email, user.username):
message = 'The login information has been updated with the changes.<br>Verification email has been sent to your new email address. Please verify it by clicking the activation link in the email.<br>Please check your spam/trash folder in case you cannot find the message.'
else:
message = 'Unable to send activation email, please contact your local Galaxy administrator.'
if trans.app.config.error_email_to is not None:
message += ' Contact: %s' % trans.app.config.error_email_to
raise exceptions.InternalServerError(message)
# Update public name
if 'username' in payload:
username = payload.get('username')
message = self._validate_publicname(username) or validate_publicname(trans, username, user)
if message:
raise exceptions.RequestParameterInvalidException(message)
if user.username != username:
user.username = username
# Update user custom form
user_info_form_id = payload.get('info|form_id')
if user_info_form_id:
prefix = 'info|'
user_info_form = trans.sa_session.query(trans.app.model.FormDefinition).get(trans.security.decode_id(user_info_form_id))
user_info_values = {}
for item in payload:
if item.startswith(prefix):
user_info_values[item[len(prefix):]] = payload[item]
form_values = trans.model.FormValues(user_info_form, user_info_values)
trans.sa_session.add(form_values)
user.values = form_values
# Update values for extra user preference items
extra_user_pref_data = dict()
extra_pref_keys = self._get_extra_user_preferences(trans)
if extra_pref_keys is not None:
for key in extra_pref_keys:
key_prefix = key + '|'
for item in payload:
if item.startswith(key_prefix):
# Show error message if the required field is empty
if payload[item] == "":
# Raise an exception when a required field is empty while saving the form
keys = item.split("|")
section = extra_pref_keys[keys[0]]
for input in section['inputs']:
if input['name'] == keys[1] and input['required']:
raise exceptions.ObjectAttributeMissingException("Please fill the required field")
extra_user_pref_data[item] = payload[item]
user.preferences["extra_user_preferences"] = json.dumps(extra_user_pref_data)
# Update user addresses
address_dicts = {}
address_count = 0
for item in payload:
match = re.match(r'^address_(?P<index>\d+)\|(?P<attribute>\S+)', item)
if match:
groups = match.groupdict()
index = int(groups['index'])
attribute = groups['attribute']
address_dicts[index] = address_dicts.get(index) or {}
address_dicts[index][attribute] = payload[item]
address_count = max(address_count, index + 1)
user.addresses = []
for index in range(0, address_count):
d = address_dicts[index]
if d.get('id'):
try:
user_address = trans.sa_session.query(trans.app.model.UserAddress).get(trans.security.decode_id(d['id']))
except Exception as e:
raise exceptions.ObjectNotFound('Failed to access user address ({}). {}'.format(d['id'], e))
else:
user_address = trans.model.UserAddress()
trans.log_event('User address added')
for field in AddressField.fields():
if str(field[2]).lower() == 'required' and not d.get(field[0]):
raise exceptions.ObjectAttributeMissingException('Address {}: {} ({}) required.'.format(index + 1, field[1], field[0]))
setattr(user_address, field[0], str(d.get(field[0], '')))
user_address.user = user
user.addresses.append(user_address)
trans.sa_session.add(user_address)
trans.sa_session.add(user)
trans.sa_session.flush()
trans.log_event('User information added')
return {'message': 'User information has been saved.'}
@expose_api
def set_favorite(self, trans, id, object_type, payload={}, **kwd):
"""Add the object to user's favorites
PUT /api/users/{id}/favorites/{object_type}
:param id: the encoded id of the user
:type id: str
:param object_type: the object type that users wants to favorite
:type object_type: str
:param object_id: the id of an object that users wants to favorite
:type object_id: str
"""
self._validate_favorite_object_type(object_type)
user = self._get_user(trans, id)
favorites = json.loads(user.preferences['favorites']) if 'favorites' in user.preferences else {}
if object_type == 'tools':
tool_id = payload.get('object_id')
tool = self.app.toolbox.get_tool(tool_id)
if not tool:
raise exceptions.ObjectNotFound("Could not find tool with id '%s'." % tool_id)
if not tool.allow_user_access(user):
raise exceptions.AuthenticationFailed("Access denied for tool with id '%s'." % tool_id)
if 'tools' in favorites:
favorite_tools = favorites['tools']
else:
favorite_tools = []
if tool_id not in favorite_tools:
favorite_tools.append(tool_id)
favorites['tools'] = favorite_tools
user.preferences['favorites'] = json.dumps(favorites)
trans.sa_session.flush()
return favorites
@expose_api
def remove_favorite(self, trans, id, object_type, object_id, payload={}, **kwd):
"""Remove the object from user's favorites
DELETE /api/users/{id}/favorites/{object_type}/{object_id:.*?}
:param id: the encoded id of the user
:type id: str
:param object_type: the object type that users wants to favorite
:type object_type: str
:param object_id: the id of an object that users wants to remove from favorites
:type object_id: str
"""
self._validate_favorite_object_type(object_type)
user = self._get_user(trans, id)
favorites = json.loads(user.preferences['favorites']) if 'favorites' in user.preferences else {}
if object_type == 'tools':
if 'tools' in favorites:
favorite_tools = favorites['tools']
if object_id in favorite_tools:
del favorite_tools[favorite_tools.index(object_id)]
favorites['tools'] = favorite_tools
user.preferences['favorites'] = json.dumps(favorites)
trans.sa_session.flush()
else:
raise exceptions.ObjectNotFound('Given object is not in the list of favorites')
return favorites
def _validate_favorite_object_type(self, object_type):
if object_type in ['tools']:
pass
else:
raise exceptions.ObjectAttributeInvalidException("This type is not supported. Given object_type: %s" % object_type)
def _validate_email(self, email):
''' Validate email and username using regex '''
if email == '' or not isinstance(email, str):
return 'Please provide your email address.'
if not re.match(r'^(([^<>()[\]\.,;:\s@"]+(\.[^<>()[\]\.,;:\s@"]+)*)|(".+"))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}])|(([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,}))$', email):
return 'Please provide your valid email address.'
if len(email) > 255:
return 'Email cannot be more than 255 characters in length.'
def _validate_publicname(self, username):
if not re.match(r'^[a-z0-9\-]{3,255}$', username):
return 'Public name must contain only lowercase letters, numbers and "-". It also has to be shorter than 255 characters but longer than 2.'
@expose_api
def get_password(self, trans, id, payload={}, **kwd):
"""
Return available password inputs.
"""
return {'inputs': [{'name': 'current', 'type': 'password', 'label': 'Current password'},
{'name': 'password', 'type': 'password', 'label': 'New password'},
{'name': 'confirm', 'type': 'password', 'label': 'Confirm password'}]}
@expose_api
def set_password(self, trans, id, payload={}, **kwd):
"""
Allows to the logged-in user to change own password.
"""
user, message = self.user_manager.change_password(trans, id=id, **payload)
if user is None:
raise exceptions.AuthenticationRequired(message)
return {"message": "Password has been changed."}
@expose_api
def get_permissions(self, trans, id, payload={}, **kwd):
"""
Get the user's default permissions for the new histories
"""
user = self._get_user(trans, id)
roles = user.all_roles()
inputs = []
for index, action in trans.app.model.Dataset.permitted_actions.items():
inputs.append({'type': 'select',
'multiple': True,
'optional': True,
'name': index,
'label': action.action,
'help': action.description,
'options': list({(r.name, r.id) for r in roles}),
'value': [a.role.id for a in user.default_permissions if a.action == action.action]})
return {'inputs': inputs}
@expose_api
def set_permissions(self, trans, id, payload={}, **kwd):
"""
Set the user's default permissions for the new histories
"""
user = self._get_user(trans, id)
permissions = {}
for index, action in trans.app.model.Dataset.permitted_actions.items():
action_id = trans.app.security_agent.get_action(action.action).action
permissions[action_id] = [trans.sa_session.query(trans.app.model.Role).get(x) for x in (payload.get(index) or [])]
trans.app.security_agent.user_set_default_permissions(user, permissions)
return {'message': 'Permissions have been saved.'}
@expose_api
def get_toolbox_filters(self, trans, id, payload={}, **kwd):
"""
API call for fetching toolbox filters data. Toolbox filters are specified in galaxy.ini.
The user can activate them and the choice is stored in user_preferences.
"""
user = self._get_user(trans, id)
filter_types = self._get_filter_types(trans)
saved_values = {}
for name, value in user.preferences.items():
if name in filter_types:
saved_values[name] = listify(value, do_strip=True)
inputs = [{
'type': 'hidden',
'name': 'helptext',
'label': 'In this section you may enable or disable Toolbox filters. Please contact your admin to configure filters as necessary.'
}]
errors = {}
factory = FilterFactory(trans.app.toolbox)
for filter_type in filter_types:
self._add_filter_inputs(factory, filter_types, inputs, errors, filter_type, saved_values)
return {'inputs': inputs, 'errors': errors}
@expose_api
def set_toolbox_filters(self, trans, id, payload={}, **kwd):
"""
API call to update toolbox filters data.
"""
user = self._get_user(trans, id)
filter_types = self._get_filter_types(trans)
for filter_type in filter_types:
new_filters = []
for prefixed_name in payload:
if payload.get(prefixed_name) == 'true' and prefixed_name.startswith(filter_type):
prefix = filter_type + '|'
new_filters.append(prefixed_name[len(prefix):])
user.preferences[filter_type] = ','.join(new_filters)
trans.sa_session.add(user)
trans.sa_session.flush()
return {'message': 'Toolbox filters have been saved.'}
def _add_filter_inputs(self, factory, filter_types, inputs, errors, filter_type, saved_values):
filter_inputs = list()
filter_values = saved_values.get(filter_type, [])
filter_config = filter_types[filter_type]['config']
filter_title = filter_types[filter_type]['title']
for filter_name in filter_config:
function = factory.build_filter_function(filter_name)
if function is None:
errors['{}|{}'.format(filter_type, filter_name)] = 'Filter function not found.'
short_description, description = None, None
doc_string = docstring_trim(function.__doc__)
split = doc_string.split('\n\n')
if split:
short_description = split[0]
if len(split) > 1:
description = split[1]
else:
log.warning('No description specified in the __doc__ string for %s.' % filter_name)
filter_inputs.append({
'type': 'boolean',
'name': filter_name,
'label': short_description or filter_name,
'help': description or 'No description available.',
'value': 'true' if filter_name in filter_values else 'false'
})
if filter_inputs:
inputs.append({'type': 'section', 'title': filter_title, 'name': filter_type, 'expanded': True, 'inputs': filter_inputs})
def _get_filter_types(self, trans):
return OrderedDict([('toolbox_tool_filters', {'title': 'Tools', 'config': trans.app.config.user_tool_filters}),
('toolbox_section_filters', {'title': 'Sections', 'config': trans.app.config.user_tool_section_filters}),
('toolbox_label_filters', {'title': 'Labels', 'config': trans.app.config.user_tool_label_filters})])
@expose_api
def api_key(self, trans, id, payload={}, **kwd):
"""
Create API key.
"""
user = self._get_user(trans, id)
return self.create_api_key(trans, user)
@expose_api
def get_api_key(self, trans, id, payload={}, **kwd):
"""
Get API key inputs.
"""
user = self._get_user(trans, id)
return self._build_inputs_api_key(user)
@expose_api
def set_api_key(self, trans, id, payload={}, **kwd):
"""
Get API key inputs with new API key.
"""
user = self._get_user(trans, id)
self.create_api_key(trans, user)
return self._build_inputs_api_key(user, message='Generated a new web API key.')
def _build_inputs_api_key(self, user, message=''):
"""
Build API key inputs.
"""
inputs = [{'name': 'api-key',
'type': 'text',
'label': 'Current API key:',
'value': user.api_keys[0].key if user.api_keys else 'Not available.',
'readonly': True,
'help': ' An API key will allow you to access via web API. Please note that this key acts as an alternate means to access your account and should be treated with the same care as your login password.'}]
return {'message': message, 'inputs': inputs}
@expose_api
def get_communication(self, trans, id, payload={}, **kwd):
"""
Build communication server inputs.
"""
user = self._get_user(trans, id)
return {'inputs': [{'name': 'enable',
'type': 'boolean',
'label': 'Enable communication',
'value': user.preferences.get('communication_server', 'false')}]}
@expose_api
def set_communication(self, trans, id, payload={}, **kwd):
"""
Allows the user to activate/deactivate the communication server.
"""
user = self._get_user(trans, id)
enable = payload.get('enable', 'false')
if enable == 'true':
message = 'Your communication server has been activated.'
else:
message = 'Your communication server has been disabled.'
user.preferences['communication_server'] = enable
trans.sa_session.add(user)
trans.sa_session.flush()
return {'message': message}
@expose_api
def get_custom_builds(self, trans, id, payload={}, **kwd):
"""
GET /api/users/{id}/custom_builds
Returns collection of custom builds.
:param id: the encoded id of the user
:type id: str
"""
user = self._get_user(trans, id)
dbkeys = json.loads(user.preferences['dbkeys']) if 'dbkeys' in user.preferences else {}
valid_dbkeys = {}
update = False
for key, dbkey in dbkeys.items():
if 'count' not in dbkey and 'linecount' in dbkey:
chrom_count_dataset = trans.sa_session.query(trans.app.model.HistoryDatasetAssociation).get(dbkey['linecount'])
if chrom_count_dataset and not chrom_count_dataset.deleted and chrom_count_dataset.state == trans.app.model.HistoryDatasetAssociation.states.OK:
chrom_count = int(open(chrom_count_dataset.file_name).readline())
dbkey['count'] = chrom_count
valid_dbkeys[key] = dbkey
update = True
else:
valid_dbkeys[key] = dbkey
if update:
user.preferences['dbkeys'] = json.dumps(valid_dbkeys)
dbkey_collection = []
for key, attributes in valid_dbkeys.items():
attributes['id'] = key
dbkey_collection.append(attributes)
return dbkey_collection
@expose_api
def add_custom_builds(self, trans, id, key, payload={}, **kwd):
"""
PUT /api/users/{id}/custom_builds/{key}
Add new custom build.
:param id: the encoded id of the user
:type id: str
:param id: custom build key
:type id: str
:param payload: data with new build details
:type payload: dict
"""
user = self._get_user(trans, id)
dbkeys = json.loads(user.preferences['dbkeys']) if 'dbkeys' in user.preferences else {}
name = payload.get('name')
len_type = payload.get('len|type')
len_value = payload.get('len|value')
if len_type not in ['file', 'fasta', 'text'] or not len_value:
raise exceptions.RequestParameterInvalidException('Please specify a valid data source type.')
if not name or not key:
raise exceptions.RequestParameterMissingException('You must specify values for all the fields.')
elif key in dbkeys:
raise exceptions.DuplicatedIdentifierException('There is already a custom build with that key. Delete it first if you want to replace it.')
else:
# Have everything needed; create new build.
build_dict = {'name': name}
if len_type in ['text', 'file']:
# Create new len file
new_len = trans.app.model.HistoryDatasetAssociation(extension='len', create_dataset=True, sa_session=trans.sa_session)
trans.sa_session.add(new_len)
new_len.name = name
new_len.visible = False
new_len.state = trans.app.model.Job.states.OK
new_len.info = 'custom build .len file'
try:
trans.app.object_store.create(new_len.dataset)
except ObjectInvalid:
raise exceptions.InternalServerError('Unable to create output dataset: object store is full.')
trans.sa_session.flush()
counter = 0
lines_skipped = 0
with open(new_len.file_name, 'w') as f:
# LEN files have format:
# <chrom_name><tab><chrom_length>
for line in len_value.split('\n'):
# Splits at the last whitespace in the line
lst = line.strip().rsplit(None, 1)
if not lst or len(lst) < 2:
lines_skipped += 1
continue
chrom, length = lst[0], lst[1]
try:
length = int(length)
except ValueError:
lines_skipped += 1
continue
if chrom != escape(chrom):
build_dict['message'] = 'Invalid chromosome(s) with HTML detected and skipped.'
lines_skipped += 1
continue
counter += 1
f.write('{}\t{}\n'.format(chrom, length))
build_dict['len'] = new_len.id
build_dict['count'] = counter
else:
build_dict['fasta'] = trans.security.decode_id(len_value)
dataset = trans.sa_session.query(trans.app.model.HistoryDatasetAssociation).get(build_dict['fasta'])
try:
new_len = dataset.get_converted_dataset(trans, 'len')
new_linecount = new_len.get_converted_dataset(trans, 'linecount')
build_dict['len'] = new_len.id
build_dict['linecount'] = new_linecount.id
except Exception:
raise exceptions.ToolExecutionError('Failed to convert dataset.')
dbkeys[key] = build_dict
user.preferences['dbkeys'] = json.dumps(dbkeys)
trans.sa_session.flush()
return build_dict
@expose_api
def delete_custom_builds(self, trans, id, key, payload={}, **kwd):
"""
DELETE /api/users/{id}/custom_builds/{key}
Delete a custom build.
:param id: the encoded id of the user
:type id: str
:param id: custom build key to be deleted
:type id: str
"""
user = self._get_user(trans, id)
dbkeys = json.loads(user.preferences['dbkeys']) if 'dbkeys' in user.preferences else {}
if key and key in dbkeys:
del dbkeys[key]
user.preferences['dbkeys'] = json.dumps(dbkeys)
trans.sa_session.flush()
return {'message': 'Deleted %s.' % key}
else:
raise exceptions.ObjectNotFound('Could not find and delete build (%s).' % key)
def _get_user(self, trans, id):
user = self.get_user(trans, id)
if not user:
raise exceptions.RequestParameterInvalidException('Invalid user (%s).' % id)
if user != trans.user and not trans.user_is_admin:
raise exceptions.InsufficientPermissionsException('Access denied.')
return user
|
import os
import time
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.utils import timezone
from member.models import UserNotification
from nadine import email
class Command(BaseCommand):
help = "Send User Notification Emails."
def handle(self, *args, **options):
here_today = list(User.helper.here_today())
for n in UserNotification.objects.filter(sent_date__isnull=True):
if n.notify_user in here_today:
if n.target_user in here_today:
email.send_user_notifications(n.notify_user, n.target_user)
n.sent_date = timezone.localtime(timezone.now())
n.save()
# Copyright 2020 Office Nomads LLC (https://officenomads.com/) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://opensource.org/licenses/Apache-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
|
#
# Copyright (c) 2021 the Hugging Face team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.#
import logging
import os
from databases import Database
from fastapi import FastAPI
from app.core.config import DATABASE_URL
logger = logging.getLogger(__name__)
async def connect_to_db(app: FastAPI) -> None:
DB_URL = f"{DATABASE_URL}_test" if os.environ.get("TESTING") else DATABASE_URL
database = Database(DB_URL, min_size=2, max_size=10)
try:
await database.connect()
app.state._db = database
except Exception as e:
logger.warn("--- DB CONNECTION ERROR ---")
logger.warn(e)
logger.warn("--- DB CONNECTION ERROR ---")
async def close_db_connection(app: FastAPI) -> None:
try:
await app.state._db.disconnect()
except Exception as e:
logger.warn("--- DB DISCONNECT ERROR ---")
logger.warn(e)
logger.warn("--- DB DISCONNECT ERROR ---")
|
# CommentPolicy.py - Pre Checkin Trigger
import os, sys, re, tkMessageBox
def checkComment(comment):
comment = comment.lower()
caseIds = re.findall(r'bug\d+|feat\d+',comment)
return caseIds
def main():
comment = os.environ.get('CLEARCASE_COMMENT','')
version = os.environ['CLEARCASE_XPN']
error = 0
caseIds = checkComment(comment)
if not caseIds:
error = 1
tkMessageBox.showerror("Missing Case ID in check-in-comment",
"Version:\n%s\n\nKommentar:\n%s" %
(version, comment))
sys.exit(error)
# Remove # below to see what environment variables ClearCase sets.
#
#text = "\n".join( [" = ".join(x) for x in os.environ.items()
# if x[0].startswith('CLEARCASE')])
#tkMessageBox.showinfo("Environment variable", text)
if __name__ == '__main__':
main()
####################################################################
# CommentPolicy2.py - Post Checkin Trigger
import os, sys, re, tkMessageBox
def checkComment(comment):
comment = comment.lower()
caseIds = re.findall(r'bug\d+|feat\d+',comment)
return caseIds
def storeCheckInComment(caseIds, version, comment):
# In real life, this fuction would use ODBC, COM etc, not a dialog!
title = 'Store info in issue database'
message = ('Hello, can you store in the issue database\n'
'that we got the following message:\n%s\n'
'when we checked in\n%s\n\n%s') % (" & ".join(caseIds),
version, comment)
if tkMessageBox.askyesno(title, message):
# Reply was yes
return 0
else:
# Reply was no
return 1
def main():
comment = os.environ.get('CLEARCASE_COMMENT','')
version = os.environ['CLEARCASE_XPN']
caseIds = checkComment(comment)
if not caseIds:
error = 1
tkMessageBox.showerror("Missing Case ID in check-in comment!!!",
"Version:\n%s\n\nComment:\n%s" %
(version, comment))
else:
error = storeCheckInComment(caseIds, version, comment)
if error:
tkMessageBox.showerror("Error in issue database system!",
"Unable to store message:\n"
+ comment)
sys.exit(error)
if __name__ == '__main__':
main()
####################################################################
# mktrig.py
import os
class Trigger:
def __init__(self, name, comment):
self.name = name
self.comment = comment
def run(self):
cmd = ('cleartool mktrtype %(type)s %(flags)s -c '
'"%(comment)s" %(what)s %(name)s')
args = {'type': self.type, 'flags': self.flags,
'comment' : self.comment,
'what': self.what, 'name': self.name}
print "Executing:"
print cmd % args
stdin, stdouterr = os.popen4(cmd % args)
stdin.close()
self.result = stdouterr.read()
stdouterr.close()
class ElementTrigger(Trigger):
type = '-element'
class TypeTrigger(Trigger):
type = '-type'
class PreCITrigger(ElementTrigger):
flags = '-all -preop checkin'
class PostCITrigger(ElementTrigger):
flags = '-all -postop checkin'
class PythonExecMixIn:
def __init__(self, script):
self.what = '-exec "python %s"' % script
class PythonPreCITrigger(PythonExecMixIn, PreCITrigger):
def __init__(self, name, comment, script):
Trigger.__init__(self, name, comment)
PythonExecMixIn.__init__(self, script)
class PythonPostCITrigger(PythonExecMixIn, PostCITrigger):
def __init__(self, name, comment, script):
Trigger.__init__(self, name, comment)
PythonExecMixIn.__init__(self, script)
trigger1 = PythonPreCITrigger('CommentPolicy',
'Verify case id in check-in comment',
'/path/to/CommentPolicy.py')
trigger2 = PythonPostCITrigger('CommentPolicy2',
'Report check-in to case handling system',
'/path/to/CommentPolicy2.py')
for trigger in [trigger1, trigger2]:
trigger.run()
print "Result:"
print trigger.result
|
"""Utilities for assertion debugging."""
import collections.abc
import os
import pprint
from typing import AbstractSet
from typing import Any
from typing import Callable
from typing import Iterable
from typing import List
from typing import Mapping
from typing import Optional
from typing import Sequence
import _pytest._code
from _pytest import outcomes
from _pytest._io.saferepr import _pformat_dispatch
from _pytest._io.saferepr import safeformat
from _pytest._io.saferepr import saferepr
from _pytest.config import Config
# The _reprcompare attribute on the util module is used by the new assertion
# interpretation code and assertion rewriter to detect this plugin was
# loaded and in turn call the hooks defined here as part of the
# DebugInterpreter.
_reprcompare: Optional[Callable[[str, object, object], Optional[str]]] = None
# Works similarly as _reprcompare attribute. Is populated with the hook call
# when pytest_runtest_setup is called.
_assertion_pass: Optional[Callable[[int, str, str], None]] = None
# Config object which is assigned during pytest_runtest_protocol.
_config: Optional[Config] = None
def format_explanation(explanation: str) -> str:
r"""Format an explanation.
Normally all embedded newlines are escaped, however there are
three exceptions: \n{, \n} and \n~. The first two are intended
cover nested explanations, see function and attribute explanations
for examples (.visit_Call(), visit_Attribute()). The last one is
for when one explanation needs to span multiple lines, e.g. when
displaying diffs.
"""
lines = _split_explanation(explanation)
result = _format_lines(lines)
return "\n".join(result)
def _split_explanation(explanation: str) -> List[str]:
r"""Return a list of individual lines in the explanation.
This will return a list of lines split on '\n{', '\n}' and '\n~'.
Any other newlines will be escaped and appear in the line as the
literal '\n' characters.
"""
raw_lines = (explanation or "").split("\n")
lines = [raw_lines[0]]
for values in raw_lines[1:]:
if values and values[0] in ["{", "}", "~", ">"]:
lines.append(values)
else:
lines[-1] += "\\n" + values
return lines
def _format_lines(lines: Sequence[str]) -> List[str]:
"""Format the individual lines.
This will replace the '{', '}' and '~' characters of our mini formatting
language with the proper 'where ...', 'and ...' and ' + ...' text, taking
care of indentation along the way.
Return a list of formatted lines.
"""
result = list(lines[:1])
stack = [0]
stackcnt = [0]
for line in lines[1:]:
if line.startswith("{"):
if stackcnt[-1]:
s = "and "
else:
s = "where "
stack.append(len(result))
stackcnt[-1] += 1
stackcnt.append(0)
result.append(" +" + " " * (len(stack) - 1) + s + line[1:])
elif line.startswith("}"):
stack.pop()
stackcnt.pop()
result[stack[-1]] += line[1:]
else:
assert line[0] in ["~", ">"]
stack[-1] += 1
indent = len(stack) if line.startswith("~") else len(stack) - 1
result.append(" " * indent + line[1:])
assert len(stack) == 1
return result
def issequence(x: Any) -> bool:
return isinstance(x, collections.abc.Sequence) and not isinstance(x, str)
def istext(x: Any) -> bool:
return isinstance(x, str)
def isdict(x: Any) -> bool:
return isinstance(x, dict)
def isset(x: Any) -> bool:
return isinstance(x, (set, frozenset))
def isnamedtuple(obj: Any) -> bool:
return isinstance(obj, tuple) and getattr(obj, "_fields", None) is not None
def isdatacls(obj: Any) -> bool:
return getattr(obj, "__dataclass_fields__", None) is not None
def isattrs(obj: Any) -> bool:
return getattr(obj, "__attrs_attrs__", None) is not None
def isiterable(obj: Any) -> bool:
try:
iter(obj)
return not istext(obj)
except TypeError:
return False
def has_default_eq(
obj: object,
) -> bool:
"""Check if an instance of an object contains the default eq
First, we check if the object's __eq__ attribute has __code__,
if so, we check the equally of the method code filename (__code__.co_filename)
to the default one generated by the dataclass and attr module
for dataclasses the default co_filename is <string>, for attrs class, the __eq__ should contain "attrs eq generated"
"""
# inspired from https://github.com/willmcgugan/rich/blob/07d51ffc1aee6f16bd2e5a25b4e82850fb9ed778/rich/pretty.py#L68
if hasattr(obj.__eq__, "__code__") and hasattr(obj.__eq__.__code__, "co_filename"):
code_filename = obj.__eq__.__code__.co_filename
if isattrs(obj):
return "attrs generated eq" in code_filename
return code_filename == "<string>" # data class
return True
def assertrepr_compare(config, op: str, left: Any, right: Any) -> Optional[List[str]]:
"""Return specialised explanations for some operators/operands."""
verbose = config.getoption("verbose")
if verbose > 1:
left_repr = safeformat(left)
right_repr = safeformat(right)
else:
# XXX: "15 chars indentation" is wrong
# ("E AssertionError: assert "); should use term width.
maxsize = (
80 - 15 - len(op) - 2
) // 2 # 15 chars indentation, 1 space around op
left_repr = saferepr(left, maxsize=maxsize)
right_repr = saferepr(right, maxsize=maxsize)
summary = f"{left_repr} {op} {right_repr}"
explanation = None
try:
if op == "==":
explanation = _compare_eq_any(left, right, verbose)
elif op == "not in":
if istext(left) and istext(right):
explanation = _notin_text(left, right, verbose)
except outcomes.Exit:
raise
except Exception:
explanation = [
"(pytest_assertion plugin: representation of details failed: {}.".format(
_pytest._code.ExceptionInfo.from_current()._getreprcrash()
),
" Probably an object has a faulty __repr__.)",
]
if not explanation:
return None
return [summary] + explanation
def _compare_eq_any(left: Any, right: Any, verbose: int = 0) -> List[str]:
explanation = []
if istext(left) and istext(right):
explanation = _diff_text(left, right, verbose)
else:
from _pytest.python_api import ApproxBase
if isinstance(left, ApproxBase) or isinstance(right, ApproxBase):
# Although the common order should be obtained == expected, this ensures both ways
approx_side = left if isinstance(left, ApproxBase) else right
other_side = right if isinstance(left, ApproxBase) else left
explanation = approx_side._repr_compare(other_side)
elif type(left) == type(right) and (
isdatacls(left) or isattrs(left) or isnamedtuple(left)
):
# Note: unlike dataclasses/attrs, namedtuples compare only the
# field values, not the type or field names. But this branch
# intentionally only handles the same-type case, which was often
# used in older code bases before dataclasses/attrs were available.
explanation = _compare_eq_cls(left, right, verbose)
elif issequence(left) and issequence(right):
explanation = _compare_eq_sequence(left, right, verbose)
elif isset(left) and isset(right):
explanation = _compare_eq_set(left, right, verbose)
elif isdict(left) and isdict(right):
explanation = _compare_eq_dict(left, right, verbose)
if isiterable(left) and isiterable(right):
expl = _compare_eq_iterable(left, right, verbose)
explanation.extend(expl)
return explanation
def _diff_text(left: str, right: str, verbose: int = 0) -> List[str]:
"""Return the explanation for the diff between text.
Unless --verbose is used this will skip leading and trailing
characters which are identical to keep the diff minimal.
"""
from difflib import ndiff
explanation: List[str] = []
if verbose < 1:
i = 0 # just in case left or right has zero length
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
break
if i > 42:
i -= 10 # Provide some context
explanation = [
"Skipping %s identical leading characters in diff, use -v to show" % i
]
left = left[i:]
right = right[i:]
if len(left) == len(right):
for i in range(len(left)):
if left[-i] != right[-i]:
break
if i > 42:
i -= 10 # Provide some context
explanation += [
"Skipping {} identical trailing "
"characters in diff, use -v to show".format(i)
]
left = left[:-i]
right = right[:-i]
keepends = True
if left.isspace() or right.isspace():
left = repr(str(left))
right = repr(str(right))
explanation += ["Strings contain only whitespace, escaping them using repr()"]
# "right" is the expected base against which we compare "left",
# see https://github.com/pytest-dev/pytest/issues/3333
explanation += [
line.strip("\n")
for line in ndiff(right.splitlines(keepends), left.splitlines(keepends))
]
return explanation
def _surrounding_parens_on_own_lines(lines: List[str]) -> None:
"""Move opening/closing parenthesis/bracket to own lines."""
opening = lines[0][:1]
if opening in ["(", "[", "{"]:
lines[0] = " " + lines[0][1:]
lines[:] = [opening] + lines
closing = lines[-1][-1:]
if closing in [")", "]", "}"]:
lines[-1] = lines[-1][:-1] + ","
lines[:] = lines + [closing]
def _compare_eq_iterable(
left: Iterable[Any], right: Iterable[Any], verbose: int = 0
) -> List[str]:
if verbose <= 0 and not running_on_ci():
return ["Use -v to get more diff"]
# dynamic import to speedup pytest
import difflib
left_formatting = pprint.pformat(left).splitlines()
right_formatting = pprint.pformat(right).splitlines()
# Re-format for different output lengths.
lines_left = len(left_formatting)
lines_right = len(right_formatting)
if lines_left != lines_right:
left_formatting = _pformat_dispatch(left).splitlines()
right_formatting = _pformat_dispatch(right).splitlines()
if lines_left > 1 or lines_right > 1:
_surrounding_parens_on_own_lines(left_formatting)
_surrounding_parens_on_own_lines(right_formatting)
explanation = ["Full diff:"]
# "right" is the expected base against which we compare "left",
# see https://github.com/pytest-dev/pytest/issues/3333
explanation.extend(
line.rstrip() for line in difflib.ndiff(right_formatting, left_formatting)
)
return explanation
def _compare_eq_sequence(
left: Sequence[Any], right: Sequence[Any], verbose: int = 0
) -> List[str]:
comparing_bytes = isinstance(left, bytes) and isinstance(right, bytes)
explanation: List[str] = []
len_left = len(left)
len_right = len(right)
for i in range(min(len_left, len_right)):
if left[i] != right[i]:
if comparing_bytes:
# when comparing bytes, we want to see their ascii representation
# instead of their numeric values (#5260)
# using a slice gives us the ascii representation:
# >>> s = b'foo'
# >>> s[0]
# 102
# >>> s[0:1]
# b'f'
left_value = left[i : i + 1]
right_value = right[i : i + 1]
else:
left_value = left[i]
right_value = right[i]
explanation += [f"At index {i} diff: {left_value!r} != {right_value!r}"]
break
if comparing_bytes:
# when comparing bytes, it doesn't help to show the "sides contain one or more
# items" longer explanation, so skip it
return explanation
len_diff = len_left - len_right
if len_diff:
if len_diff > 0:
dir_with_more = "Left"
extra = saferepr(left[len_right])
else:
len_diff = 0 - len_diff
dir_with_more = "Right"
extra = saferepr(right[len_left])
if len_diff == 1:
explanation += [f"{dir_with_more} contains one more item: {extra}"]
else:
explanation += [
"%s contains %d more items, first extra item: %s"
% (dir_with_more, len_diff, extra)
]
return explanation
def _compare_eq_set(
left: AbstractSet[Any], right: AbstractSet[Any], verbose: int = 0
) -> List[str]:
explanation = []
diff_left = left - right
diff_right = right - left
if diff_left:
explanation.append("Extra items in the left set:")
for item in diff_left:
explanation.append(saferepr(item))
if diff_right:
explanation.append("Extra items in the right set:")
for item in diff_right:
explanation.append(saferepr(item))
return explanation
def _compare_eq_dict(
left: Mapping[Any, Any], right: Mapping[Any, Any], verbose: int = 0
) -> List[str]:
explanation: List[str] = []
set_left = set(left)
set_right = set(right)
common = set_left.intersection(set_right)
same = {k: left[k] for k in common if left[k] == right[k]}
if same and verbose < 2:
explanation += ["Omitting %s identical items, use -vv to show" % len(same)]
elif same:
explanation += ["Common items:"]
explanation += pprint.pformat(same).splitlines()
diff = {k for k in common if left[k] != right[k]}
if diff:
explanation += ["Differing items:"]
for k in diff:
explanation += [saferepr({k: left[k]}) + " != " + saferepr({k: right[k]})]
extra_left = set_left - set_right
len_extra_left = len(extra_left)
if len_extra_left:
explanation.append(
"Left contains %d more item%s:"
% (len_extra_left, "" if len_extra_left == 1 else "s")
)
explanation.extend(
pprint.pformat({k: left[k] for k in extra_left}).splitlines()
)
extra_right = set_right - set_left
len_extra_right = len(extra_right)
if len_extra_right:
explanation.append(
"Right contains %d more item%s:"
% (len_extra_right, "" if len_extra_right == 1 else "s")
)
explanation.extend(
pprint.pformat({k: right[k] for k in extra_right}).splitlines()
)
return explanation
def _compare_eq_cls(left: Any, right: Any, verbose: int) -> List[str]:
if not has_default_eq(left):
return []
if isdatacls(left):
all_fields = left.__dataclass_fields__
fields_to_check = [field for field, info in all_fields.items() if info.compare]
elif isattrs(left):
all_fields = left.__attrs_attrs__
fields_to_check = [field.name for field in all_fields if getattr(field, "eq")]
elif isnamedtuple(left):
fields_to_check = left._fields
else:
assert False
indent = " "
same = []
diff = []
for field in fields_to_check:
if getattr(left, field) == getattr(right, field):
same.append(field)
else:
diff.append(field)
explanation = []
if same or diff:
explanation += [""]
if same and verbose < 2:
explanation.append("Omitting %s identical items, use -vv to show" % len(same))
elif same:
explanation += ["Matching attributes:"]
explanation += pprint.pformat(same).splitlines()
if diff:
explanation += ["Differing attributes:"]
explanation += pprint.pformat(diff).splitlines()
for field in diff:
field_left = getattr(left, field)
field_right = getattr(right, field)
explanation += [
"",
"Drill down into differing attribute %s:" % field,
("%s%s: %r != %r") % (indent, field, field_left, field_right),
]
explanation += [
indent + line
for line in _compare_eq_any(field_left, field_right, verbose)
]
return explanation
def _notin_text(term: str, text: str, verbose: int = 0) -> List[str]:
index = text.find(term)
head = text[:index]
tail = text[index + len(term) :]
correct_text = head + tail
diff = _diff_text(text, correct_text, verbose)
newdiff = ["%s is contained here:" % saferepr(term, maxsize=42)]
for line in diff:
if line.startswith("Skipping"):
continue
if line.startswith("- "):
continue
if line.startswith("+ "):
newdiff.append(" " + line[2:])
else:
newdiff.append(line)
return newdiff
def running_on_ci() -> bool:
"""Check if we're currently running on a CI system."""
env_vars = ["CI", "BUILD_NUMBER"]
return any(var in os.environ for var in env_vars)
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: building_zone_names.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='building_zone_names.proto',
package='building_zone_names',
syntax='proto3',
serialized_options=_b('P\001'),
serialized_pb=_b('\n\x19\x62uilding_zone_names.proto\x12\x13\x62uilding_zone_names\"\x11\n\x0f\x42uildingRequest\"\x1f\n\x0bZoneRequest\x12\x10\n\x08\x62uilding\x18\x01 \x01(\t\"\x19\n\tNamePoint\x12\x0c\n\x04name\x18\x01 \x01(\t\"6\n\x05Reply\x12-\n\x05names\x18\x01 \x03(\x0b\x32\x1e.building_zone_names.NamePoint2\xb3\x01\n\x11\x42uildingZoneNames\x12R\n\x0cGetBuildings\x12$.building_zone_names.BuildingRequest\x1a\x1a.building_zone_names.Reply\"\x00\x12J\n\x08GetZones\x12 .building_zone_names.ZoneRequest\x1a\x1a.building_zone_names.Reply\"\x00\x42\x02P\x01\x62\x06proto3')
)
_BUILDINGREQUEST = _descriptor.Descriptor(
name='BuildingRequest',
full_name='building_zone_names.BuildingRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=50,
serialized_end=67,
)
_ZONEREQUEST = _descriptor.Descriptor(
name='ZoneRequest',
full_name='building_zone_names.ZoneRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='building', full_name='building_zone_names.ZoneRequest.building', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=69,
serialized_end=100,
)
_NAMEPOINT = _descriptor.Descriptor(
name='NamePoint',
full_name='building_zone_names.NamePoint',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='building_zone_names.NamePoint.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=102,
serialized_end=127,
)
_REPLY = _descriptor.Descriptor(
name='Reply',
full_name='building_zone_names.Reply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='names', full_name='building_zone_names.Reply.names', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=129,
serialized_end=183,
)
_REPLY.fields_by_name['names'].message_type = _NAMEPOINT
DESCRIPTOR.message_types_by_name['BuildingRequest'] = _BUILDINGREQUEST
DESCRIPTOR.message_types_by_name['ZoneRequest'] = _ZONEREQUEST
DESCRIPTOR.message_types_by_name['NamePoint'] = _NAMEPOINT
DESCRIPTOR.message_types_by_name['Reply'] = _REPLY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
BuildingRequest = _reflection.GeneratedProtocolMessageType('BuildingRequest', (_message.Message,), dict(
DESCRIPTOR = _BUILDINGREQUEST,
__module__ = 'building_zone_names_pb2'
# @@protoc_insertion_point(class_scope:building_zone_names.BuildingRequest)
))
_sym_db.RegisterMessage(BuildingRequest)
ZoneRequest = _reflection.GeneratedProtocolMessageType('ZoneRequest', (_message.Message,), dict(
DESCRIPTOR = _ZONEREQUEST,
__module__ = 'building_zone_names_pb2'
# @@protoc_insertion_point(class_scope:building_zone_names.ZoneRequest)
))
_sym_db.RegisterMessage(ZoneRequest)
NamePoint = _reflection.GeneratedProtocolMessageType('NamePoint', (_message.Message,), dict(
DESCRIPTOR = _NAMEPOINT,
__module__ = 'building_zone_names_pb2'
# @@protoc_insertion_point(class_scope:building_zone_names.NamePoint)
))
_sym_db.RegisterMessage(NamePoint)
Reply = _reflection.GeneratedProtocolMessageType('Reply', (_message.Message,), dict(
DESCRIPTOR = _REPLY,
__module__ = 'building_zone_names_pb2'
# @@protoc_insertion_point(class_scope:building_zone_names.Reply)
))
_sym_db.RegisterMessage(Reply)
DESCRIPTOR._options = None
_BUILDINGZONENAMES = _descriptor.ServiceDescriptor(
name='BuildingZoneNames',
full_name='building_zone_names.BuildingZoneNames',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=186,
serialized_end=365,
methods=[
_descriptor.MethodDescriptor(
name='GetBuildings',
full_name='building_zone_names.BuildingZoneNames.GetBuildings',
index=0,
containing_service=None,
input_type=_BUILDINGREQUEST,
output_type=_REPLY,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='GetZones',
full_name='building_zone_names.BuildingZoneNames.GetZones',
index=1,
containing_service=None,
input_type=_ZONEREQUEST,
output_type=_REPLY,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_BUILDINGZONENAMES)
DESCRIPTOR.services_by_name['BuildingZoneNames'] = _BUILDINGZONENAMES
# @@protoc_insertion_point(module_scope)
|
from gui import GUI
program = GUI()
program.run()
|
import numpy as np
def WithinMP(x,rho,Rss=1.42,Alpha=0.5):
'''
Determines if a set of x and rho (sqrt(y**2 + z**2)) coordinates are
within the magnetopause boundary or not.
Inputs:
x: Position(s) in x MSM direction.
rho: Position(s) in rho MSM direction.
Rss: Distance of the subsolar point on the magnetopause.
Alpha: Magnetopause flaring parameter.
Returns:
boolean(s) where True means that the position is within the
magnetopause
'''
theta = np.arctan2(rho,x)
r = np.sqrt(x**2 + rho**2)
Rtheta = Rss*(2.0/(1.0+np.cos(theta)))**Alpha
return Rtheta > r
|
# -*- coding: utf-8 -*-
from futu import *
quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)
print(quote_ctx.get_trading_days(Market.HK, start='2018-02-01', end='2018-02-05'))
from futu.quote.quote_get_warrant import Request
req = Request()
req.sort_field = SortField.CODE
req.ascend = True
req.type_list = [WrtType.BEAR, WrtType.CALL]
req .issuer_list = [Issuer.CS, Issuer.CT, Issuer.EA]
print(quote_ctx.get_warrant("HK.00700", req))
quote_ctx.close()
|
from unittest import TestCase
from chatterbot.adapters.storage import JsonFileStorageAdapter
from chatterbot.conversation import Statement, Response
class JsonAdapterTestCase(TestCase):
def setUp(self):
"""
Instantiate the adapter.
"""
from random import randint
# Generate a random name for the database
database_name = str(randint(0, 9000))
self.adapter = JsonFileStorageAdapter(database=database_name)
def tearDown(self):
"""
Remove the test database.
"""
self.adapter.drop()
class JsonFileStorageAdapterTestCase(JsonAdapterTestCase):
def test_count_returns_zero(self):
"""
The count method should return a value of 0
when nothing has been saved to the database.
"""
self.assertEqual(self.adapter.count(), 0)
def test_count_returns_value(self):
"""
The count method should return a value of 1
when one item has been saved to the database.
"""
statement = Statement("Test statement")
self.adapter.update(statement)
self.assertEqual(self.adapter.count(), 1)
def test_statement_not_found(self):
"""
Test that None is returned by the find method
when a matching statement is not found.
"""
self.assertEqual(self.adapter.find("Non-existant"), None)
def test_statement_found(self):
"""
Test that a matching statement is returned
when it exists in the database.
"""
statement = Statement("New statement")
self.adapter.update(statement)
found_statement = self.adapter.find("New statement")
self.assertNotEqual(found_statement, None)
self.assertEqual(found_statement.text, statement.text)
def test_update_adds_new_statement(self):
statement = Statement("New statement")
self.adapter.update(statement)
statement_found = self.adapter.find("New statement")
self.assertNotEqual(statement_found, None)
self.assertEqual(statement_found.text, statement.text)
def test_update_modifies_existing_statement(self):
statement = Statement("New statement")
self.adapter.update(statement)
# Check the initial values
found_statement = self.adapter.find(statement.text)
self.assertEqual(
len(found_statement.in_response_to), 0
)
# Update the statement value
statement.add_response(
Response("New response")
)
self.adapter.update(statement)
# Check that the values have changed
found_statement = self.adapter.find(statement.text)
self.assertEqual(
len(found_statement.in_response_to), 1
)
def test_get_random_returns_statement(self):
statement = Statement("New statement")
self.adapter.update(statement)
random_statement = self.adapter.get_random()
self.assertEqual(random_statement.text, statement.text)
def test_find_returns_nested_responses(self):
response_list = [
Response("Yes"),
Response("No")
]
statement = Statement(
"Do you like this?",
in_response_to=response_list
)
self.adapter.update(statement)
result = self.adapter.find(statement.text)
self.assertIn("Yes", result.in_response_to)
self.assertIn("No", result.in_response_to)
def test_multiple_responses_added_on_update(self):
statement = Statement(
"You are welcome.",
in_response_to=[
Response("Thank you."),
Response("Thanks.")
]
)
self.adapter.update(statement)
result = self.adapter.find(statement.text)
self.assertEqual(len(result.in_response_to), 2)
self.assertIn(statement.in_response_to[0], result.in_response_to)
self.assertIn(statement.in_response_to[1], result.in_response_to)
def test_update_saves_statement_with_multiple_responses(self):
statement = Statement(
"You are welcome.",
in_response_to=[
Response("Thank you."),
Response("Thanks."),
]
)
self.adapter.update(statement)
response = self.adapter.find(statement.text)
self.assertEqual(len(response.in_response_to), 2)
def test_getting_and_updating_statement(self):
statement = Statement("Hi")
self.adapter.update(statement)
statement.add_response(Response("Hello"))
statement.add_response(Response("Hello"))
self.adapter.update(statement)
response = self.adapter.find(statement.text)
self.assertEqual(len(response.in_response_to), 1)
self.assertEqual(response.in_response_to[0].occurrence, 2)
def test_deserialize_responses(self):
response_list = [
{"text": "Test", "occurrence": 3},
{"text": "Testing", "occurrence": 1},
]
results = self.adapter.deserialize_responses(response_list)
self.assertEqual(len(results), 2)
def test_remove(self):
text = "Sometimes you have to run before you can walk."
statement = Statement(text)
self.adapter.update(statement)
self.adapter.remove(statement.text)
result = self.adapter.find(text)
self.assertIsNone(result)
def test_remove_response(self):
text = "Sometimes you have to run before you can walk."
statement = Statement(
"A test flight is not recommended at this design phase.",
in_response_to=[Response(text)]
)
self.adapter.update(statement)
self.adapter.remove(statement.text)
results = self.adapter.filter(in_response_to__contains=text)
self.assertEqual(results, [])
def test_get_response_statements(self):
"""
Test that we are able to get a list of only statements
that are known to be in response to another statement.
"""
statement_list = [
Statement("What... is your quest?"),
Statement("This is a phone."),
Statement("A what?", in_response_to=[Response("This is a phone.")]),
Statement("A phone.", in_response_to=[Response("A what?")])
]
for statement in statement_list:
self.adapter.update(statement)
responses = self.adapter.get_response_statements()
self.assertEqual(len(responses), 2)
self.assertIn("This is a phone.", responses)
self.assertIn("A what?", responses)
class JsonFileStorageAdapterFilterTestCase(JsonAdapterTestCase):
def setUp(self):
super(JsonFileStorageAdapterFilterTestCase, self).setUp()
self.statement1 = Statement(
"Testing...",
in_response_to=[
Response("Why are you counting?")
]
)
self.statement2 = Statement(
"Testing one, two, three.",
in_response_to=[
Response("Testing...")
]
)
def test_filter_text_no_matches(self):
self.adapter.update(self.statement1)
results = self.adapter.filter(text="Howdy")
self.assertEqual(len(results), 0)
def test_filter_in_response_to_no_matches(self):
self.adapter.update(self.statement1)
results = self.adapter.filter(
in_response_to=[Response("Maybe")]
)
self.assertEqual(len(results), 0)
def test_filter_equal_results(self):
statement1 = Statement(
"Testing...",
in_response_to=[]
)
statement2 = Statement(
"Testing one, two, three.",
in_response_to=[]
)
self.adapter.update(statement1)
self.adapter.update(statement2)
results = self.adapter.filter(in_response_to=[])
self.assertEqual(len(results), 2)
self.assertIn(statement1, results)
self.assertIn(statement2, results)
def test_filter_contains_result(self):
self.adapter.update(self.statement1)
self.adapter.update(self.statement2)
results = self.adapter.filter(
in_response_to__contains="Why are you counting?"
)
self.assertEqual(len(results), 1)
self.assertIn(self.statement1, results)
def test_filter_contains_no_result(self):
self.adapter.update(self.statement1)
results = self.adapter.filter(
in_response_to__contains="How do you do?"
)
self.assertEqual(results, [])
def test_filter_multiple_parameters(self):
self.adapter.update(self.statement1)
self.adapter.update(self.statement2)
results = self.adapter.filter(
text="Testing...",
in_response_to__contains="Why are you counting?"
)
self.assertEqual(len(results), 1)
self.assertIn(self.statement1, results)
def test_filter_multiple_parameters_no_results(self):
self.adapter.update(self.statement1)
self.adapter.update(self.statement2)
results = self.adapter.filter(
text="Test",
in_response_to__contains="Not an existing response."
)
self.assertEqual(len(results), 0)
def test_filter_no_parameters(self):
"""
If no parameters are passed to the filter,
then all statements should be returned.
"""
statement1 = Statement("Testing...")
statement2 = Statement("Testing one, two, three.")
self.adapter.update(statement1)
self.adapter.update(statement2)
results = self.adapter.filter()
self.assertEqual(len(results), 2)
def test_filter_returns_statement_with_multiple_responses(self):
statement = Statement(
"You are welcome.",
in_response_to=[
Response("Thanks."),
Response("Thank you.")
]
)
self.adapter.update(statement)
response = self.adapter.filter(
in_response_to__contains="Thanks."
)
# Get the first response
response = response[0]
self.assertEqual(len(response.in_response_to), 2)
def test_response_list_in_results(self):
"""
If a statement with response values is found using
the filter method, they should be returned as
response objects.
"""
statement = Statement(
"The first is to help yourself, the second is to help others.",
in_response_to=[
Response("Why do people have two hands?")
]
)
self.adapter.update(statement)
found = self.adapter.filter(text=statement.text)
self.assertEqual(len(found[0].in_response_to), 1)
self.assertEqual(type(found[0].in_response_to[0]), Response)
class ReadOnlyJsonFileStorageAdapterTestCase(JsonAdapterTestCase):
def test_update_does_not_add_new_statement(self):
self.adapter.read_only = True
statement = Statement("New statement")
self.adapter.update(statement)
statement_found = self.adapter.find("New statement")
self.assertEqual(statement_found, None)
def test_update_does_not_modify_existing_statement(self):
statement = Statement("New statement")
self.adapter.update(statement)
self.adapter.read_only = True
statement.add_response(
Response("New response")
)
self.adapter.update(statement)
statement_found = self.adapter.find("New statement")
self.assertEqual(statement_found.text, statement.text)
self.assertEqual(
len(statement_found.in_response_to), 0
)
|
import torch
from model_save import *
import torchvision
from torch import nn
# ๆนๅผ1-ใไฟๅญๆนๅผ1๏ผๅ ่ฝฝๆจกๅ
model = torch.load("vgg16_method1.pth")
# print(model)
# ๆนๅผ2๏ผๅ ่ฝฝๆจกๅ
vgg16 = torchvision.models.vgg16(pretrained=False)
vgg16.load_state_dict(torch.load("vgg16_method2.pth"))
# model = torch.load("vgg16_method2.pth")
# print(vgg16)
# ้ท้ฑ1
# class Tudui(nn.Module):
# def __init__(self):
# super(Tudui, self).__init__()
# self.conv1 = nn.Conv2d(3, 64, kernel_size=3)
#
# def forward(self, x):
# x = self.conv1(x)
# return x
model = torch.load('tudui_method1.pth')
print(model)
|
import warnings
warnings.warn(
"\n\n"
"In a future version of Scanpy, `scanpy.api` will be removed.\n"
"Simply use `import scanpy as sc` and `import scanpy.external as sce` instead.\n",
FutureWarning
)
from anndata import AnnData
from ..neighbors import Neighbors
from anndata import read as read_h5ad
from anndata import read_csv, read_excel, read_hdf, read_loom, read_mtx, read_text, read_umi_tools
from .. import __version__
from . import tl
from . import pl
from . import pp
from ..readwrite import read, read_10x_h5, read_10x_mtx, write, read_params, write_params
from . import datasets
from . import export_to
from . import logging
from . import queries
from .. import plotting
# unfortunately, we cannot put this here as long as we have simple global
# variables in settings... they couldn't be set in this case...
# the main drawback is that we have to import set_figure_params
# to show in the docs for that reason...
# it would be nice to make the simple data types "properties of the
# module"... putting setters and getters for all of them wouldn't be very nice
from .._settings import settings
# for now - or maybe as the permanently favored solution - put the single function here
# from ..settings import set_figure_params
set_figure_params = settings.set_figure_params
# some stuff that is not actually documented...
from .. import utils
import sys
utils.annotate_doc_types(sys.modules[__name__], 'scanpy')
del sys
__doc__ = """\
Global API (deprecated)
=======================
.. warning::
.. deprecated:: 1.3.7
Use the top level module instead: `import scanpy as sc`.
For the deprecated high-level API documented on this page, use `import scanpy.api as sc`.
Preprocessing: PP
------------------
Filtering of highly-variable genes, batch-effect correction, per-cell normalization, preprocessing recipes.
Basic Preprocessing
~~~~~~~~~~~~~~~~~~~
For visual quality control, see :func:`~scanpy.api.pl.highest_expr_gens` and
:func:`~scanpy.api.pl.filter_genes_dispersion` in the :doc:`plotting API
<plotting>`.
.. autosummary::
:toctree: .
pp.calculate_qc_metrics
pp.filter_cells
pp.filter_genes
pp.highly_variable_genes
pp.filter_genes_dispersion
pp.log1p
pp.pca
pp.normalize_per_cell
pp.regress_out
pp.scale
pp.subsample
pp.downsample_counts
Recipes
~~~~~~~
.. autosummary::
:toctree: .
pp.recipe_zheng17
pp.recipe_weinreb17
pp.recipe_seurat
Batch effect correction
~~~~~~~~~~~~~~~~~~~~~~~
Note that a simple batch correction method is available via :func:`pp.regress_out`.
``pp.bbknn`` is just an alias for :func:`bbknn.bbknn`. Refer to it for the documentation.
.. autosummary::
:toctree: .
pp.bbknn
pp.mnn_correct
Imputation
~~~~~~~~~~
Note that the fundamental limitations of imputation are still under `debate
<https://github.com/theislab/scanpy/issues/189>`__.
.. autosummary::
:toctree: .
pp.dca
pp.magic
Neighbors
~~~~~~~~~
.. autosummary::
:toctree: .
pp.neighbors
Tools: TL
----------
Embeddings
~~~~~~~~~~
.. autosummary::
:toctree: .
tl.pca
tl.tsne
tl.umap
tl.draw_graph
tl.diffmap
tl.phate
Clustering and trajectory inference
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autosummary::
:toctree: .
tl.leiden
tl.louvain
tl.dpt
tl.paga
Marker genes
~~~~~~~~~~~~
.. autosummary::
:toctree: .
tl.rank_genes_groups
Gene scores, Cell cycle
~~~~~~~~~~~~~~~~~~~~~~~
.. autosummary::
:toctree: .
tl.score_genes
tl.score_genes_cell_cycle
tl.sandbag
tl.cyclone
Simulations
~~~~~~~~~~~
.. autosummary::
:toctree: .
tl.sim
Plotting: PL
------------
The plotting :doc:`plotting API <plotting>` largely parallels the ``tl.*`` and
``pp.*`` functions. For most tools and for some preprocessing functions, you'll
find a plotting function with the same name.
Reading
-------
*Note:* For reading annotation use :ref:`pandas.read_โฆ <pandas:io>` and add
it to your `AnnData` object. The following read functions are intended for
the numeric data in the data matrix `X`.
Read common file formats using
.. autosummary::
:toctree: .
read
Read 10x formatted hdf5 files and directories containing `.mtx` files using
.. autosummary::
:toctree: .
read_10x_h5
read_10x_mtx
Read other formats using functions borrowed from :mod:`anndata`
.. autosummary::
:toctree: .
read_h5ad
read_csv
read_excel
read_hdf
read_loom
read_mtx
read_text
read_umi_tools
Queries
-------
.. autosummary::
:toctree: .
queries.mitochondrial_genes
Classes
-------
:class:`~anndata.AnnData` is reexported from :mod:`anndata`.
Represent data as a neighborhood structure, usually a knn graph.
.. autosummary::
:toctree: .
Neighbors
.. _settings:
Settings
--------
A convenience function for setting some default ``matplotlib.rcParams`` and a
high-resolution jupyter display backend useful for use in notebooks.
.. autosummary::
:toctree: .
set_figure_params
Influence the global behavior of plotting functions. In non-interactive scripts,
you'd usually want to set :class:`settings.autoshow` to ``False``.
============================================== ===================================
:class:`settings.autoshow` Automatically show figures (default: ``True``).
:class:`settings.autosave` Automatically save figures (default: ``False``).
============================================== ===================================
The default directories for saving figures and caching files.
============================================== ===================================
:class:`settings.figdir` Directory for saving figures (default: `Path('figures')`).
:class:`settings.cachedir` Directory for cache files (default: `Path('cache')`).
:class:`settings.datasetdir` Directory for example datasets (default: `Path('data')`).
============================================== ===================================
The verbosity of logging output, where verbosity levels have the following
meaning: 0='error', 1='warning', 2='info', 3='hint', 4=more details, 5=even more
details, etc.
============================================== ===================================
:class:`settings.verbosity` Verbosity level (default: 1).
============================================== ===================================
Print versions of packages that might influence numerical results.
.. autosummary::
:toctree: .
logging.print_versions
Datasets
--------
.. autosummary::
:toctree: .
datasets.blobs
datasets.krumsiek11
datasets.moignard15
datasets.pbmc3k
datasets.pbmc68k_reduced
datasets.paul15
datasets.toggleswitch
Exporting
---------
.. autosummary::
:toctree: .
export_to.spring_project
"""
|
import model
import theano_funcs
import utils
from iter_funcs import get_batch_idx
# credit to @fulhack: https://twitter.com/fulhack/status/721842480140967936
import seaborn # NOQA - never used, but improves matplotlib's style
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1 import ImageGrid
from sklearn.decomposition import PCA
from os.path import join
def plot(Z1, y1, Z2, y2, filename=None, title=None):
digit_colors = [
'red', 'green', 'blue', 'cyan', 'magenta',
'yellow', 'black', 'white', 'orange', 'gray',
]
legend, labels = [], []
for i in range(0, 10):
idx1 = y1 == i
idx2 = y2 == i
pc1 = plt.scatter(
Z1[idx1, 0], Z1[idx1, 1],
marker='o', color=digit_colors[i],
)
legend.append(pc1)
labels.append('%d' % i)
pc2 = plt.scatter(
Z2[idx2, 0], Z2[idx2, 1],
marker='x', color=digit_colors[i],
)
legend.append(pc2)
labels.append('%d' % i)
# only plot digit colors to avoid cluttering the legend
plt.legend(legend[::2], labels[::2], loc='upper left', ncol=1)
if title is not None:
plt.title(title)
if filename is None:
filename = 'plot.png'
plt.savefig(filename, bbox_inches='tight')
# always a good sanity check
def plot_pca():
print('loading data')
X_train, y_train, X_test, y_test = utils.load_mnist()
pca = PCA(n_components=2)
print('transforming training data')
Z_train = pca.fit_transform(X_train)
print('transforming test data')
Z_test = pca.transform(X_test)
plot(Z_train, y_train, Z_test, y_test,
filename='pca.png', title='projected onto principle components')
def plot_autoencoder(weightsfile):
print('building model')
layers = model.build_model()
batch_size = 128
print('compiling theano function')
encoder_func = theano_funcs.create_encoder_func(layers)
print('loading weights from %s' % (weightsfile))
model.load_weights([
layers['l_decoder_out'],
layers['l_discriminator_out'],
], weightsfile)
print('loading data')
X_train, y_train, X_test, y_test = utils.load_mnist()
train_datapoints = []
print('transforming training data')
for train_idx in get_batch_idx(X_train.shape[0], batch_size):
X_train_batch = X_train[train_idx]
train_batch_codes = encoder_func(X_train_batch)
train_datapoints.append(train_batch_codes)
test_datapoints = []
print('transforming test data')
for test_idx in get_batch_idx(X_test.shape[0], batch_size):
X_test_batch = X_test[test_idx]
test_batch_codes = encoder_func(X_test_batch)
test_datapoints.append(test_batch_codes)
Z_train = np.vstack(train_datapoints)
Z_test = np.vstack(test_datapoints)
plot(Z_train, y_train, Z_test, y_test,
filename='adversarial_train_val.png',
title='projected onto latent space of autoencoder')
def plot_latent_space(weightsfile):
print('building model')
layers = model.build_model()
batch_size = 128
decoder_func = theano_funcs.create_decoder_func(layers)
print('loading weights from %s' % (weightsfile))
model.load_weights([
layers['l_decoder_out'],
layers['l_discriminator_out'],
], weightsfile)
# regularly-spaced grid of points sampled from p(z)
Z = np.mgrid[2:-2.2:-0.2, -2:2.2:0.2].reshape(2, -1).T[:, ::-1].astype(np.float32)
reconstructions = []
print('generating samples')
for idx in get_batch_idx(Z.shape[0], batch_size):
Z_batch = Z[idx]
X_batch = decoder_func(Z_batch)
reconstructions.append(X_batch)
X = np.vstack(reconstructions)
X = X.reshape(X.shape[0], 28, 28)
fig = plt.figure(1, (12., 12.))
ax1 = plt.axes(frameon=False)
ax1.get_xaxis().set_visible(False)
ax1.get_yaxis().set_visible(False)
plt.title('samples generated from latent space of autoencoder')
grid = ImageGrid(
fig, 111, nrows_ncols=(21, 21),
share_all=True)
print('plotting latent space')
for i, x in enumerate(X):
img = (x * 255).astype(np.uint8)
grid[i].imshow(img, cmap='Greys_r')
grid[i].get_xaxis().set_visible(False)
grid[i].get_yaxis().set_visible(False)
grid[i].set_frame_on(False)
plt.savefig('latent_train_val.png', bbox_inches='tight')
if __name__ == '__main__':
weightsfile = join('weights', 'weights_train_val.pickle')
#plot_autoencoder(weightsfile)
#plot_pca()
plot_latent_space(weightsfile)
|
import tensorflow as tf
from tensorflow.python.framework import ops
import sys, os
base_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(base_dir)
nnquery_module = tf.load_op_library(os.path.join(base_dir, 'tf_nnquery_so.so'))
def build_sphere_neighbor(database,
query,
radius=0.1,
dilation_rate=None,
nnsample=100):
'''
Input:
database: (batch, npoint, 3+x) float32 array, database points
query: (batch, mpoint, 3) float32 array, query points
radius: float32, range search radius
dilation_rate: float32, dilation rate of range search
nnsample: int32, maximum number of neighbors to be sampled
Output:
nn_index: (batch, mpoint, nnsample) int32 array, neighbor and filter bin indices
nn_count: (batch, mpoint) int32 array, number of neighbors
nn_dist(optional): (batch, mpoint, nnsample) float32, sqrt distance array
'''
database = database[:,:,0:3]
query = query[:,:,0:3]
if dilation_rate is not None:
radius = dilation_rate * radius
return nnquery_module.build_sphere_neighbor(database, query, radius, nnsample)
ops.NoGradient('BuildSphereNeighbor')
def build_cube_neighbor(database,
query,
length=0.1,
dilation_rate=None,
nnsample=100,
gridsize=3):
'''
Input:
database: (batch, npoint, 3) float32 array, database points
query: (batch, mpoint, 3) float32 array, query points
length: float32, cube search length
dilation_rate: float32, dilation rate of cube search
nnsample: int32, maximum number of neighbors to be sampled
gridsize: int32 , cubical kernel size
Output:
nn_index: (batch, mpoint, nnsample, 2) int32 array, neighbor and filter bin indices
nn_count: (batch, mpoint) int32 array, number of neighbors
'''
database = database[:, :, 0:3]
query = query[:, :, 0:3]
if dilation_rate is not None:
length = dilation_rate * length
return nnquery_module.build_cube_neighbor(database, query, length, nnsample, gridsize)
ops.NoGradient('BuildCubeNeighbor')
|
from typing import List, Tuple
import numpy as np
import seaborn as sns
import pandas as pd
sns.set_style("darkgrid")
def space_sep_upper(column_name: str) -> str:
"""
Separates strings at underscores into headings.
Used to generate labels from logging names.
Parameters
----------
column_name : str
Returns
-------
str
"""
if column_name is None:
return None
return column_name.title().replace("_", " ")
def generate_global_step(
data: pd.DataFrame,
x_column: str = "global_step",
x_label_columns: str = ["episode", "step"],
) -> Tuple[pd.DataFrame, str, List[str]]:
"""
Add a global_step column which enumerate all step over all episodes.
Returns the altered data, a data frame containing mapping between global_step, x_column and x_label_columns.
Often used in combination with add_multi_level_ticks.
Parameters
----------
data:
x_column: str
the name of the global_step (default 'global_step')
x_label_columns: [str, ...]
the name and hierarchical order of the columns (default ['episode', 'step']
Returns
-------
(data, plot_index, x_column, x_label_columns)
"""
plot_index = (
data.groupby(x_label_columns)
.count()
.reset_index()[x_label_columns]
.sort_values(x_label_columns)
)
plot_index[x_column] = np.arange(len(plot_index))
plot_index.set_index(x_column)
data = data.merge(plot_index, on=x_label_columns)
return data, plot_index, x_column, x_label_columns
def add_multi_level_ticks(
grid: sns.FacetGrid, plot_index: pd.DataFrame, x_column: str, x_label_columns: str
) -> None:
"""
Expects a FacedGrid with global_step (x_column) as x-axis and replaces the tick labels to match format episode:step
E.g. Run with 3 episodes, each of 10 steps. This results in 30 global steps.
The resulting tick labels could be ['0', '4', '9', '14', '19', '24', '29'].
After applying this method they will look like ['0:0', '0:4', '1:0', '1:4', '2:0', '2:4', '3:0', '3:4']
Parameters
----------
grid: sns.FacesGrid
plot_index: pd.DataFrame
The mapping between current tick labels (global step values) and new tick labels joined by ':'.
usually the result from generate_global_step
x_column: str
column label to use for looking up tick values
x_label_columns: [str, ...]
columns labels of columns to use for new labels (joined by ':'
Returns
-------
"""
for ax in grid.axes.flat:
ticks = ax.get_xticks()
sub_set = plot_index[plot_index[x_column].isin(ticks)]
new_labels = (
sub_set.loc[tick][x_label_columns].tolist()
if tick in sub_set.index
else (None, None)
for tick in ticks
)
new_labels = [
f"{epoch}:{step}" if epoch is not None else "" for epoch, step in new_labels
]
ax.set_xticklabels(new_labels, minor=False)
def plot(
plot_function,
settings: dict,
title: str = None,
x_label: str = None,
y_label: str = None,
**kwargs,
) -> sns.FacetGrid:
"""
Helper function that: create a FacetGrid
1. Updates settings with kwargs (overwrites values)
2. Plots using plot_function(**settings)
3. Set x and y labels of not provided the columns names will converted to pretty strings using space_sep_upper
4. Sets title (some times has to be readjusted afterwards especially in case of large plots e.g. multiple rows/cols)
Parameters
----------
plot_function:
function to generate the FacedGrid. E.g. sns.catplot or sns.catplot
settings: dict
a dicts containing all needed default settings.
title: str
Title of the plot (optional)
x_label: str
Label of the x-axis (optional)
y_label: str
Label of the y-axis (optional)
kwargs:
Keyword arguments to overwrite default settings.
Returns
-------
sns.FacedGrid
"""
settings.update(kwargs.items()) # 1.
grid = plot_function(**settings) # 2.
# 3.
x_label = space_sep_upper(grid._x_var) if x_label is None else x_label
y_label = space_sep_upper(grid._y_var) if y_label is None else y_label
grid.set_xlabels(x_label)
grid.set_ylabels(y_label)
# 4.
grid.tight_layout()
if title is not None:
grid.fig.suptitle(title, y=0.97) # rule of thumb. Has to be improved in future
grid.fig.subplots_adjust(top=0.9)
return grid
def plot_performance(
data, title=None, x_label=None, y_label=None, **kwargs
) -> sns.FacetGrid:
"""
Create a line plot of the performance over episodes.
Per default the mean performance and and one stddev over all instances and seeds is shown if you want to change
this specify a property to map those attributes to e.g hue='seed' or/and col='instance'.
For more details see: https://seaborn.pydata.org/generated/seaborn.relplot.html
For examples refer to examples/plotting/performance_plotting.py
Parameters
----------
data: pd.DataFrame
Dataframe resulting from logging and loading using log2dataframe(logs, wide=True)
title: str
Title of the plot (optional)
x_label: str
Label of the x-axis (optional)
y_label: str
Label of the y-axis (optional)
kwargs:
Keyword arguments to overwrite default settings.
Returns
-------
sns.FacedGrid
"""
settings = {
"data": data,
"x": "episode",
"y": "overall_performance",
"kind": "line",
}
grid = plot(sns.relplot, settings, title, x_label, y_label, **kwargs)
return grid
def plot_performance_per_instance(
data, title=None, x_label=None, y_label=None, **args
) -> sns.FacetGrid:
"""
Create a bar plot of the mean performance per instance ordered by the performance.
Per default the mean performance seeds is shown if you want to change
this specify a property to map seed to e.g. col='seed'.
For more details see: https://seaborn.pydata.org/generated/seaborn.catplot.html
For examples refer to examples/plotting/performance_plotting.py
Parameters
----------
data: pd.DataFrame
Dataframe resulting from logging and loading using log2dataframe(logs, wide=True)
title: str
Title of the plot (optional)
x_label: str
Label of the x-axis (optional)
y_label: str
Label of the y-axis (optional)
kwargs:
Keyword arguments to overwrite default settings.
Returns
-------
sns.FacedGrid
"""
# order the columns by mean instance
order = data.groupby("instance").mean().sort_values("overall_performance").index
settings = {
"data": data,
"x": "instance",
"y": "overall_performance",
"order": order,
"kind": "bar",
}
grid = plot(sns.catplot, settings, title, x_label, y_label, **args)
# todo: should probably not always be set like this (multi row/col)
grid.set_titles("Mean Performance per Instance")
return grid
def plot_step_time(
data,
show_global_step=False,
interval=1,
title=None,
x_label=None,
y_label=None,
**args,
) -> sns.FacetGrid:
"""
Create a line plot showing the measured time per step.
Per default the mean performance and and one stddev over all instances and seeds is shown if you want to change
this specify a property to map those attributes to e.g hue='seed' or/and col='instance'.
For more details see: https://seaborn.pydata.org/generated/seaborn.relplot.html
For examples refer to examples/plotting/time_plotting.py
Parameters
----------
data: pd.DataFrame
Dataframe resulting from logging and loading using log2dataframe(logs, wide=True)
show_global_step: bool
If to show the global_step (step enumerated over all episodes) or Episode:Step. (False default)
interval: int
Interval in number of steps to average over. (default = 1)
title: str
Title of the plot (optional)
x_label: str
Label of the x-axis (optional)
y_label: str
Label of the y-axis (optional)
kwargs:
Keyword arguments to overwrite default settings.
Returns
-------
sns.FacedGrid
"""
multi_level_x_label = "Epoch:Step"
data, plot_index, x_column, x_label_columns = generate_global_step(data)
if interval > 1:
data["groups"] = data[x_column] // interval
data = data.groupby("groups").agg({x_column: "min", "step_duration": "mean"})
y_label = (
f"Mean per duration per {interval} steps" if y_label is None else y_label
)
settings = {
"data": data,
"x": x_column,
"y": "step_duration",
"kind": "line",
}
if x_label is None and not show_global_step:
x_label = multi_level_x_label
grid = plot(sns.relplot, settings, title, x_label, y_label, **args)
if not show_global_step:
add_multi_level_ticks(grid, plot_index, x_column, x_label_columns)
return grid
def plot_episode_time(
data, title=None, x_label=None, y_label=None, **kargs
) -> sns.FacetGrid:
"""
Create a line plot showing the measured time per episode.
Per default the mean performance and and one stddev over all instances and seeds is shown if you want to change
this specify a property to map those attributes to e.g hue='seed' or/and col='instance'.
For more details see: https://seaborn.pydata.org/generated/seaborn.relplot.html
For examples refer to examples/plotting/time_plotting.py
Parameters
----------
data: pd.DataFrame
Dataframe resulting from logging and loading using log2dataframe(logs, wide=True)
title: str
Title of the plot (optional)
x_label: str
Label of the x-axis (optional)
y_label: str
Label of the y-axis (optional)
kwargs:
Keyword arguments to overwrite default settings.
Returns
-------
sns.FacedGrid
"""
settings = {
"data": data,
"x": "episode",
"y": "episode_duration",
"kind": "line",
}
grid = plot(sns.relplot, settings, title, x_label, y_label, **kargs)
return grid
def plot_action(
data,
show_global_step=False,
interval=1,
title=None,
x_label=None,
y_label=None,
**kargs,
):
"""
Create a line plot showing actions over time.
Please be aware that action spaces can be quite large and the plots can become quite messy (and take some time)
if you try plot all dimensions at once. It is therefore recommended to select a subset of columns before running the
plot method.
Per default the mean performance and and one stddev over all instances and seeds is shown if you want to change
this specify a property to map those attributes to e.g hue='seed' or/and col='instance'.
For more details see: https://seaborn.pydata.org/generated/seaborn.relplot.html
For examples refer to examples/plotting/action_plotting.py
Parameters
----------
data: pd.DataFrame
Dataframe resulting from logging and loading using log2dataframe(logs, wide=True)
show_global_step: bool
If to show the global_step (step enumerated over all episodes) or Episode:Step. (False default)
interval: int
Interval in number of steps to average over. (default = 1)
title: str
Title of the plot (optional)
x_label: str
Label of the x-axis (optional)
y_label: str
Label of the y-axis (optional)
kwargs:
Keyword arguments to overwrite default settings.
Returns
-------
sns.FacedGrid
"""
return plot_space(
data, "action", show_global_step, interval, title, x_label, y_label, **kargs
)
def plot_state(
data,
show_global_step=False,
interval=1,
title=None,
x_label=None,
y_label=None,
**kargs,
):
"""
Create a line plot showing state over time.
Please be aware that state can be quite large and the plots can become quite messy (and take some time)
if you try plot all dimensions at once. It is therefore recommended to select a subset of columns before running the
plot method. Especially for dict state spaces.
Per default the mean performance and and one stddev over all instances and seeds is shown if you want to change
this specify a property to map those attributes to e.g hue='seed' or/and col='instance'.
For more details see: https://seaborn.pydata.org/generated/seaborn.relplot.html
For examples refer to examples/plotting/state_plotting.py
Parameters
----------
data: pd.DataFrame
Dataframe resulting from logging and loading using log2dataframe(logs, wide=True)
show_global_step: bool
If to show the global_step (step enumerated over all episodes) or Episode:Step. (False default)
interval: int
Interval in number of steps to average over. (default = 1)
title: str
Title of the plot (optional)
x_label: str
Label of the x-axis (optional)
y_label: str
Label of the y-axis (optional)
kwargs:
Keyword arguments to overwrite default settings.
Returns
-------
sns.FacedGrid
"""
return plot_space(
data, "state", show_global_step, interval, title, x_label, y_label, **kargs
)
def plot_space(
data,
space_column_name,
show_global_step,
interval=1,
title=None,
x_label=None,
y_label=None,
**args,
) -> sns.FacetGrid:
"""
Create a line plot showing sapce over time.
Please be aware that spaces can be quite large and the plots can become quite messy (and take some time)
if you try plot all dimensions at once. It is therefore recommended to select a subset of columns before running the
plot method. Especially for dict spaces.
Per default the mean performance and and one stddev over all instances and seeds is shown if you want to change
this specify a property to map those attributes to e.g hue='seed' or/and col='instance'.
For more details see: https://seaborn.pydata.org/generated/seaborn.relplot.html
For examples refer to
examples/plotting/state_plotting.py or
examples/plotting/action_plotting.py
Parameters
----------
data: pd.DataFrame
Dataframe resulting from logging and loading using log2dataframe(logs, wide=True)
show_global_step: bool
If to show the global_step (step enumerated over all episodes) or Episode:Step. (False default)
interval: int
Interval in number of steps to average over. (default = 1)
title: str
Title of the plot (optional)
x_label: str
Label of the x-axis (optional)
y_label: str
Label of the y-axis (optional)
kwargs:
Keyword arguments to overwrite default settings.
Returns
-------
sns.FacedGrid
"""
# first find columns with prefix space_column_name
space_entries = list(
filter(lambda col: col.startswith(space_column_name), data.columns)
)
number_of_space_entries = len(space_entries)
y_label_name = space_column_name
if number_of_space_entries > 1:
# if we have more than one space dims we reshape the dataframe in order to be able to control the plots behavior
# per dimension
data = pd.wide_to_long(
data,
stubnames=[space_column_name],
sep="_",
i=["episode", "step", "instance"]
+ (["seed"] if "seed" in data.columns else []),
j="i",
suffix=".*",
).reset_index()
elif number_of_space_entries == 1 and space_column_name not in data.columns:
# Of there is only one dimension but the name is odd
space_column_name, *_ = space_entries
data, plot_index, x_column, x_label_columns = generate_global_step(data)
# perform averaging over intervals
if interval > 1:
data["interval"] = data[x_column] // interval
group_columns = list(
data.columns.drop(x_label_columns + [x_column, space_column_name])
)
data = data.groupby(group_columns).agg(
{x_column: "min", space_column_name: "mean"}
)
y_label = (
f"Mean {y_label_name} per {interval} steps" if y_label is None else y_label
)
data = data.reset_index()
settings = {
"data": data,
"x": x_column,
"y": space_column_name,
"kind": "line",
}
# we want the different dims in different plots / columns
# todo: refactor
if number_of_space_entries > 1:
settings["col"] = "i"
if number_of_space_entries > 3:
settings["col_wrap"] = 3
if "instance" in data.columns:
settings["hue"] = "instance"
if x_label is None:
x_label = None if show_global_step else "Epoch:Step"
if y_label is None:
y_label = y_label_name
grid = plot(sns.relplot, settings, title, x_label, y_label, **args)
if not show_global_step:
add_multi_level_ticks(grid, plot_index, x_column, x_label_columns)
return grid
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import glob
import multiprocessing as mp
import os
import time
import cv2
import tqdm
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
from predictor import VisualizationDemo
# constants
WINDOW_NAME = "COCO detections"
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# Set score_threshold for builtin models
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="Detectron2 Demo")
parser.add_argument(
"--config-file",
default="configs/quick_schedules/e2e_mask_rcnn_R_50_FPN_inference_acc_test.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.")
parser.add_argument("--video-input", help="Path to video file.")
parser.add_argument("--input", nargs="+", help="A list of space separated input images")
parser.add_argument(
"--output",
help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.5,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--opts",
help="Modify model config options using the command-line",
default=[],
nargs=argparse.REMAINDER,
)
return parser
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
logger = setup_logger()
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
demo = VisualizationDemo(cfg)
if args.input:
if len(args.input) == 1:
args.input = glob.glob(os.path.expanduser(args.input[0]))
assert args.input, "The input path(s) was not found"
for path in tqdm.tqdm(args.input, disable=not args.output):
# use PIL, to be consistent with evaluation
img = read_image(path, format="BGR")
start_time = time.time()
predictions, visualized_output = demo.run_on_image(img)
logger.info(
"{}: detected {} instances in {:.2f}s".format(
path, len(predictions["instances"]), time.time() - start_time
)
)
if args.output:
if os.path.isdir(args.output):
assert os.path.isdir(args.output), args.output
out_filename = os.path.join(args.output, os.path.basename(path))
else:
assert len(args.input) == 1, "Please specify a directory with args.output"
out_filename = args.output
visualized_output.save(out_filename)
else:
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.imshow(WINDOW_NAME, visualized_output.get_image()[:, :, ::-1])
if cv2.waitKey(0) == 27:
break # esc to quit
elif args.webcam:
assert args.input is None, "Cannot have both --input and --webcam!"
cam = cv2.VideoCapture(0)
for vis in tqdm.tqdm(demo.run_on_video(cam)):
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.imshow(WINDOW_NAME, vis)
if cv2.waitKey(1) == 27:
break # esc to quit
cv2.destroyAllWindows()
elif args.video_input:
video = cv2.VideoCapture(args.video_input)
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
frames_per_second = video.get(cv2.CAP_PROP_FPS)
num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
basename = os.path.basename(args.video_input)
if args.output:
if os.path.isdir(args.output):
output_fname = os.path.join(args.output, basename)
output_fname = os.path.splitext(output_fname)[0] + ".mkv"
else:
output_fname = args.output
assert not os.path.isfile(output_fname), output_fname
output_file = cv2.VideoWriter(
filename=output_fname,
# some installation of opencv may not support x264 (due to its license),
# you can try other format (e.g. MPEG)
fourcc=cv2.VideoWriter_fourcc(*"x264"),
fps=float(frames_per_second),
frameSize=(width, height),
isColor=True,
)
assert os.path.isfile(args.video_input)
for vis_frame in tqdm.tqdm(demo.run_on_video(video), total=num_frames):
if args.output:
output_file.write(vis_frame)
else:
cv2.namedWindow(basename, cv2.WINDOW_NORMAL)
cv2.imshow(basename, vis_frame)
if cv2.waitKey(1) == 27:
break # esc to quit
video.release()
if args.output:
output_file.release()
else:
cv2.destroyAllWindows()
|
import datetime
from ..controllers.logger import Logger
from ..core.fake_ssl_thread import FakeSslThreadABC
from ..utils.functions import bypass_error, fake_certificate_exists
from ..utils.socket import close_socket_pass_exc, get_bind_socket
from traceback import format_exc
from threading import Thread
from ssl import SSLSocket
class FakeSslProxy(Thread):
def __init__(self, remote_address, cli_address):
Thread.__init__(self)
self.name = self.__server_name = "Fake SSL Server Proxy (%s:%d)" % remote_address
self.__address = "127.0.0.1"
self.__port = None
self.__logger = Logger()
self.__server_socket = None
self.__remote_address = remote_address
self.__cli_address = cli_address
self.__from_port = 4000
self.__to_port = 9000
self.ready = False
self.start()
#####################################
# PRIVATE METHODS
#####################################
# function to wait a generation of fake certificate
def __wait_certificate(self, timeout=2):
start_time = datetime.datetime.now()
max_time = datetime.timedelta(seconds=timeout)
while 1:
if fake_certificate_exists(self.__remote_address[0]):
return
exec_time = datetime.datetime.now() - start_time
# exec_time.time
if exec_time > max_time:
raise Exception("Wait certificate for %s timeout" % self.__remote_address)
# function to get a socket on a free port
def __get_sock_on_free_port(self) -> SSLSocket:
for port in range(self.__from_port, self.__to_port):
self.__port = int(port)
try:
sock = get_bind_socket(
(self.__address, port),
True,
"conf/key/fake-gen/%s.crt" % self.__remote_address[0],
"conf/key/fake-gen/%s.key" % self.__remote_address[0]
)
return sock
except Exception as e:
continue
raise Exception("Free port for %s server not found" % self.__server_name)
#####################################
# PUBLIC METHODS
#####################################
def shutdown(self):
self.ready = False
close_socket_pass_exc(self.__server_socket)
# method to get the tuple that represent
# the server address (host and port)
def get_address(self) -> tuple:
return self.__address, self.__port
# method to start loop server
def run(self) -> None:
cli_socket = None
try:
self.__wait_certificate()
# create socket and start listen to it
self.__server_socket = self.__get_sock_on_free_port()
except Exception as e:
close_socket_pass_exc(self.__server_socket)
out = '' if bypass_error(e) else format_exc()
out += '[!!] %s starting failed\n' % self.__server_name
out += '[!!] Caught an exception %s\n' % str(e)
self.__logger.print_err(out)
return
# start listen and loop server
self.__logger.print_conn('[*] Start %s listen on %s:%d\n' % (self.__server_name, self.__address, self.__port))
self.__server_socket.listen()
self.ready = True
while self.ready:
try:
cli_socket, cli_address = self.__server_socket.accept()
cli_address = cli_address[:2]
# print connection info
self.__logger.print_conn('[=>] Local client connect to %s' % self.__server_name)
# start thread to communicate with client and remote host
proxy_thread = FakeSslThreadABC(
cli_socket,
cli_address,
self.__server_socket,
self.__remote_address,
self.__server_name
)
proxy_thread.start()
except KeyboardInterrupt:
self.__logger.print_err("[!!] Keyboard interrupt. Exit...")
self.__server_socket.close()
exit()
except Exception as e:
out = '' if bypass_error(e) else format_exc()
out += '[!!] Caught an exception on %s: %s\n' % (self.__server_name, str(e))
self.__logger.print_err(out)
# break
# close all sockets
self.__logger.print_conn('[*] Close %s\n' % self.__server_name)
close_socket_pass_exc(cli_socket)
close_socket_pass_exc(self.__server_socket)
|
import unittest
import os
from monty.collections import frozendict, Namespace, AttrDict, \
FrozenAttrDict, tree
test_dir = os.path.join(os.path.dirname(__file__), 'test_files')
class FrozenDictTest(unittest.TestCase):
def test_frozen_dict(self):
d = frozendict({"hello": "world"})
self.assertRaises(KeyError, d.__setitem__, "k", "v")
self.assertRaises(KeyError, d.update, {"k": "v"})
self.assertEqual(d["hello"], "world")
def test_namespace_dict(self):
d = Namespace(foo="bar")
d["hello"] = "world"
self.assertEqual(d["foo"], "bar")
self.assertRaises(KeyError, d.__setitem__, "foo", "spam")
def test_attr_dict(self):
d = AttrDict(foo=1, bar=2)
self.assertEqual(d.bar, 2)
self.assertEqual(d["foo"], d.foo)
d.bar = "hello"
self.assertEqual(d["bar"], "hello")
def test_frozen_attrdict(self):
d = FrozenAttrDict({"hello": "world", 1: 2})
self.assertEqual(d["hello"], "world")
self.assertEqual(d.hello, "world")
self.assertRaises(KeyError, d.update, {"updating": 2})
with self.assertRaises(KeyError): d["foo"] = "bar"
with self.assertRaises(KeyError): d.foo = "bar"
with self.assertRaises(KeyError): d.hello = "new"
class TreeTest(unittest.TestCase):
def test_tree(self):
x = tree()
x['a']['b']['c']['d'] = 1
self.assertIn('b', x['a'])
self.assertNotIn('c', x['a'])
self.assertIn('c', x['a']['b'])
self.assertEqual(x['a']['b']['c']['d'], 1)
if __name__ == "__main__":
unittest.main()
|
"""test sparse matrix construction functions"""
import numpy as np
from numpy import array
from numpy.testing import (assert_equal, assert_,
assert_array_equal, assert_array_almost_equal_nulp)
import pytest
from pytest import raises as assert_raises
from scipy._lib._testutils import check_free_memory
from scipy._lib._util import check_random_state
from scipy.sparse import csr_matrix, coo_matrix, construct
from scipy.sparse.construct import rand as sprand
from scipy.sparse.sputils import matrix
sparse_formats = ['csr','csc','coo','bsr','dia','lil','dok']
#TODO check whether format=XXX is respected
def _sprandn(m, n, density=0.01, format="coo", dtype=None, random_state=None):
# Helper function for testing.
random_state = check_random_state(random_state)
data_rvs = random_state.standard_normal
return construct.random(m, n, density, format, dtype,
random_state, data_rvs)
class TestConstructUtils(object):
def test_spdiags(self):
diags1 = array([[1, 2, 3, 4, 5]])
diags2 = array([[1, 2, 3, 4, 5],
[6, 7, 8, 9,10]])
diags3 = array([[1, 2, 3, 4, 5],
[6, 7, 8, 9,10],
[11,12,13,14,15]])
cases = []
cases.append((diags1, 0, 1, 1, [[1]]))
cases.append((diags1, [0], 1, 1, [[1]]))
cases.append((diags1, [0], 2, 1, [[1],[0]]))
cases.append((diags1, [0], 1, 2, [[1,0]]))
cases.append((diags1, [1], 1, 2, [[0,2]]))
cases.append((diags1,[-1], 1, 2, [[0,0]]))
cases.append((diags1, [0], 2, 2, [[1,0],[0,2]]))
cases.append((diags1,[-1], 2, 2, [[0,0],[1,0]]))
cases.append((diags1, [3], 2, 2, [[0,0],[0,0]]))
cases.append((diags1, [0], 3, 4, [[1,0,0,0],[0,2,0,0],[0,0,3,0]]))
cases.append((diags1, [1], 3, 4, [[0,2,0,0],[0,0,3,0],[0,0,0,4]]))
cases.append((diags1, [2], 3, 5, [[0,0,3,0,0],[0,0,0,4,0],[0,0,0,0,5]]))
cases.append((diags2, [0,2], 3, 3, [[1,0,8],[0,2,0],[0,0,3]]))
cases.append((diags2, [-1,0], 3, 4, [[6,0,0,0],[1,7,0,0],[0,2,8,0]]))
cases.append((diags2, [2,-3], 6, 6, [[0,0,3,0,0,0],
[0,0,0,4,0,0],
[0,0,0,0,5,0],
[6,0,0,0,0,0],
[0,7,0,0,0,0],
[0,0,8,0,0,0]]))
cases.append((diags3, [-1,0,1], 6, 6, [[6,12, 0, 0, 0, 0],
[1, 7,13, 0, 0, 0],
[0, 2, 8,14, 0, 0],
[0, 0, 3, 9,15, 0],
[0, 0, 0, 4,10, 0],
[0, 0, 0, 0, 5, 0]]))
cases.append((diags3, [-4,2,-1], 6, 5, [[0, 0, 8, 0, 0],
[11, 0, 0, 9, 0],
[0,12, 0, 0,10],
[0, 0,13, 0, 0],
[1, 0, 0,14, 0],
[0, 2, 0, 0,15]]))
for d,o,m,n,result in cases:
assert_equal(construct.spdiags(d,o,m,n).todense(), result)
def test_diags(self):
a = array([1, 2, 3, 4, 5])
b = array([6, 7, 8, 9, 10])
c = array([11, 12, 13, 14, 15])
cases = []
cases.append((a[:1], 0, (1, 1), [[1]]))
cases.append(([a[:1]], [0], (1, 1), [[1]]))
cases.append(([a[:1]], [0], (2, 1), [[1],[0]]))
cases.append(([a[:1]], [0], (1, 2), [[1,0]]))
cases.append(([a[:1]], [1], (1, 2), [[0,1]]))
cases.append(([a[:2]], [0], (2, 2), [[1,0],[0,2]]))
cases.append(([a[:1]],[-1], (2, 2), [[0,0],[1,0]]))
cases.append(([a[:3]], [0], (3, 4), [[1,0,0,0],[0,2,0,0],[0,0,3,0]]))
cases.append(([a[:3]], [1], (3, 4), [[0,1,0,0],[0,0,2,0],[0,0,0,3]]))
cases.append(([a[:1]], [-2], (3, 5), [[0,0,0,0,0],[0,0,0,0,0],[1,0,0,0,0]]))
cases.append(([a[:2]], [-1], (3, 5), [[0,0,0,0,0],[1,0,0,0,0],[0,2,0,0,0]]))
cases.append(([a[:3]], [0], (3, 5), [[1,0,0,0,0],[0,2,0,0,0],[0,0,3,0,0]]))
cases.append(([a[:3]], [1], (3, 5), [[0,1,0,0,0],[0,0,2,0,0],[0,0,0,3,0]]))
cases.append(([a[:3]], [2], (3, 5), [[0,0,1,0,0],[0,0,0,2,0],[0,0,0,0,3]]))
cases.append(([a[:2]], [3], (3, 5), [[0,0,0,1,0],[0,0,0,0,2],[0,0,0,0,0]]))
cases.append(([a[:1]], [4], (3, 5), [[0,0,0,0,1],[0,0,0,0,0],[0,0,0,0,0]]))
cases.append(([a[:1]], [-4], (5, 3), [[0,0,0],[0,0,0],[0,0,0],[0,0,0],[1,0,0]]))
cases.append(([a[:2]], [-3], (5, 3), [[0,0,0],[0,0,0],[0,0,0],[1,0,0],[0,2,0]]))
cases.append(([a[:3]], [-2], (5, 3), [[0,0,0],[0,0,0],[1,0,0],[0,2,0],[0,0,3]]))
cases.append(([a[:3]], [-1], (5, 3), [[0,0,0],[1,0,0],[0,2,0],[0,0,3],[0,0,0]]))
cases.append(([a[:3]], [0], (5, 3), [[1,0,0],[0,2,0],[0,0,3],[0,0,0],[0,0,0]]))
cases.append(([a[:2]], [1], (5, 3), [[0,1,0],[0,0,2],[0,0,0],[0,0,0],[0,0,0]]))
cases.append(([a[:1]], [2], (5, 3), [[0,0,1],[0,0,0],[0,0,0],[0,0,0],[0,0,0]]))
cases.append(([a[:3],b[:1]], [0,2], (3, 3), [[1,0,6],[0,2,0],[0,0,3]]))
cases.append(([a[:2],b[:3]], [-1,0], (3, 4), [[6,0,0,0],[1,7,0,0],[0,2,8,0]]))
cases.append(([a[:4],b[:3]], [2,-3], (6, 6), [[0,0,1,0,0,0],
[0,0,0,2,0,0],
[0,0,0,0,3,0],
[6,0,0,0,0,4],
[0,7,0,0,0,0],
[0,0,8,0,0,0]]))
cases.append(([a[:4],b,c[:4]], [-1,0,1], (5, 5), [[6,11, 0, 0, 0],
[1, 7,12, 0, 0],
[0, 2, 8,13, 0],
[0, 0, 3, 9,14],
[0, 0, 0, 4,10]]))
cases.append(([a[:2],b[:3],c], [-4,2,-1], (6, 5), [[0, 0, 6, 0, 0],
[11, 0, 0, 7, 0],
[0,12, 0, 0, 8],
[0, 0,13, 0, 0],
[1, 0, 0,14, 0],
[0, 2, 0, 0,15]]))
# too long arrays are OK
cases.append(([a], [0], (1, 1), [[1]]))
cases.append(([a[:3],b], [0,2], (3, 3), [[1, 0, 6], [0, 2, 0], [0, 0, 3]]))
cases.append((np.array([[1, 2, 3], [4, 5, 6]]), [0,-1], (3, 3), [[1, 0, 0], [4, 2, 0], [0, 5, 3]]))
# scalar case: broadcasting
cases.append(([1,-2,1], [1,0,-1], (3, 3), [[-2, 1, 0],
[1, -2, 1],
[0, 1, -2]]))
for d, o, shape, result in cases:
err_msg = "%r %r %r %r" % (d, o, shape, result)
assert_equal(construct.diags(d, o, shape=shape).todense(),
result, err_msg=err_msg)
if shape[0] == shape[1] and hasattr(d[0], '__len__') and len(d[0]) <= max(shape):
# should be able to find the shape automatically
assert_equal(construct.diags(d, o).todense(), result,
err_msg=err_msg)
def test_diags_default(self):
a = array([1, 2, 3, 4, 5])
assert_equal(construct.diags(a).todense(), np.diag(a))
def test_diags_default_bad(self):
a = array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6]])
assert_raises(ValueError, construct.diags, a)
def test_diags_bad(self):
a = array([1, 2, 3, 4, 5])
b = array([6, 7, 8, 9, 10])
c = array([11, 12, 13, 14, 15])
cases = []
cases.append(([a[:0]], 0, (1, 1)))
cases.append(([a[:4],b,c[:3]], [-1,0,1], (5, 5)))
cases.append(([a[:2],c,b[:3]], [-4,2,-1], (6, 5)))
cases.append(([a[:2],c,b[:3]], [-4,2,-1], None))
cases.append(([], [-4,2,-1], None))
cases.append(([1], [-5], (4, 4)))
cases.append(([a], 0, None))
for d, o, shape in cases:
assert_raises(ValueError, construct.diags, d, o, shape)
assert_raises(TypeError, construct.diags, [[None]], [0])
def test_diags_vs_diag(self):
# Check that
#
# diags([a, b, ...], [i, j, ...]) == diag(a, i) + diag(b, j) + ...
#
np.random.seed(1234)
for n_diags in [1, 2, 3, 4, 5, 10]:
n = 1 + n_diags//2 + np.random.randint(0, 10)
offsets = np.arange(-n+1, n-1)
np.random.shuffle(offsets)
offsets = offsets[:n_diags]
diagonals = [np.random.rand(n - abs(q)) for q in offsets]
mat = construct.diags(diagonals, offsets)
dense_mat = sum([np.diag(x, j) for x, j in zip(diagonals, offsets)])
assert_array_almost_equal_nulp(mat.todense(), dense_mat)
if len(offsets) == 1:
mat = construct.diags(diagonals[0], offsets[0])
dense_mat = np.diag(diagonals[0], offsets[0])
assert_array_almost_equal_nulp(mat.todense(), dense_mat)
def test_diags_dtype(self):
x = construct.diags([2.2], [0], shape=(2, 2), dtype=int)
assert_equal(x.dtype, int)
assert_equal(x.todense(), [[2, 0], [0, 2]])
def test_diags_one_diagonal(self):
d = list(range(5))
for k in range(-5, 6):
assert_equal(construct.diags(d, k).toarray(),
construct.diags([d], [k]).toarray())
def test_diags_empty(self):
x = construct.diags([])
assert_equal(x.shape, (0, 0))
def test_identity(self):
assert_equal(construct.identity(1).toarray(), [[1]])
assert_equal(construct.identity(2).toarray(), [[1,0],[0,1]])
I = construct.identity(3, dtype='int8', format='dia')
assert_equal(I.dtype, np.dtype('int8'))
assert_equal(I.format, 'dia')
for fmt in sparse_formats:
I = construct.identity(3, format=fmt)
assert_equal(I.format, fmt)
assert_equal(I.toarray(), [[1,0,0],[0,1,0],[0,0,1]])
def test_eye(self):
assert_equal(construct.eye(1,1).toarray(), [[1]])
assert_equal(construct.eye(2,3).toarray(), [[1,0,0],[0,1,0]])
assert_equal(construct.eye(3,2).toarray(), [[1,0],[0,1],[0,0]])
assert_equal(construct.eye(3,3).toarray(), [[1,0,0],[0,1,0],[0,0,1]])
assert_equal(construct.eye(3,3,dtype='int16').dtype, np.dtype('int16'))
for m in [3, 5]:
for n in [3, 5]:
for k in range(-5,6):
assert_equal(construct.eye(m, n, k=k).toarray(), np.eye(m, n, k=k))
if m == n:
assert_equal(construct.eye(m, k=k).toarray(), np.eye(m, n, k=k))
def test_eye_one(self):
assert_equal(construct.eye(1).toarray(), [[1]])
assert_equal(construct.eye(2).toarray(), [[1,0],[0,1]])
I = construct.eye(3, dtype='int8', format='dia')
assert_equal(I.dtype, np.dtype('int8'))
assert_equal(I.format, 'dia')
for fmt in sparse_formats:
I = construct.eye(3, format=fmt)
assert_equal(I.format, fmt)
assert_equal(I.toarray(), [[1,0,0],[0,1,0],[0,0,1]])
def test_kron(self):
cases = []
cases.append(array([[0]]))
cases.append(array([[-1]]))
cases.append(array([[4]]))
cases.append(array([[10]]))
cases.append(array([[0],[0]]))
cases.append(array([[0,0]]))
cases.append(array([[1,2],[3,4]]))
cases.append(array([[0,2],[5,0]]))
cases.append(array([[0,2,-6],[8,0,14]]))
cases.append(array([[5,4],[0,0],[6,0]]))
cases.append(array([[5,4,4],[1,0,0],[6,0,8]]))
cases.append(array([[0,1,0,2,0,5,8]]))
cases.append(array([[0.5,0.125,0,3.25],[0,2.5,0,0]]))
for a in cases:
for b in cases:
result = construct.kron(csr_matrix(a),csr_matrix(b)).todense()
expected = np.kron(a,b)
assert_array_equal(result,expected)
def test_kron_large(self):
n = 2**16
a = construct.eye(1, n, n-1)
b = construct.eye(n, 1, 1-n)
construct.kron(a, a)
construct.kron(b, b)
def test_kronsum(self):
cases = []
cases.append(array([[0]]))
cases.append(array([[-1]]))
cases.append(array([[4]]))
cases.append(array([[10]]))
cases.append(array([[1,2],[3,4]]))
cases.append(array([[0,2],[5,0]]))
cases.append(array([[0,2,-6],[8,0,14],[0,3,0]]))
cases.append(array([[1,0,0],[0,5,-1],[4,-2,8]]))
for a in cases:
for b in cases:
result = construct.kronsum(csr_matrix(a),csr_matrix(b)).todense()
expected = np.kron(np.eye(len(b)), a) + \
np.kron(b, np.eye(len(a)))
assert_array_equal(result,expected)
def test_vstack(self):
A = coo_matrix([[1,2],[3,4]])
B = coo_matrix([[5,6]])
expected = matrix([[1, 2],
[3, 4],
[5, 6]])
assert_equal(construct.vstack([A,B]).todense(), expected)
assert_equal(construct.vstack([A,B], dtype=np.float32).dtype, np.float32)
assert_equal(construct.vstack([A.tocsr(),B.tocsr()]).todense(),
expected)
assert_equal(construct.vstack([A.tocsr(),B.tocsr()], dtype=np.float32).dtype,
np.float32)
assert_equal(construct.vstack([A.tocsr(),B.tocsr()],
dtype=np.float32).indices.dtype, np.int32)
assert_equal(construct.vstack([A.tocsr(),B.tocsr()],
dtype=np.float32).indptr.dtype, np.int32)
def test_hstack(self):
A = coo_matrix([[1,2],[3,4]])
B = coo_matrix([[5],[6]])
expected = matrix([[1, 2, 5],
[3, 4, 6]])
assert_equal(construct.hstack([A,B]).todense(), expected)
assert_equal(construct.hstack([A,B], dtype=np.float32).dtype, np.float32)
assert_equal(construct.hstack([A.tocsc(),B.tocsc()]).todense(),
expected)
assert_equal(construct.hstack([A.tocsc(),B.tocsc()], dtype=np.float32).dtype,
np.float32)
def test_bmat(self):
A = coo_matrix([[1,2],[3,4]])
B = coo_matrix([[5],[6]])
C = coo_matrix([[7]])
D = coo_matrix((0,0))
expected = matrix([[1, 2, 5],
[3, 4, 6],
[0, 0, 7]])
assert_equal(construct.bmat([[A,B],[None,C]]).todense(), expected)
expected = matrix([[1, 2, 0],
[3, 4, 0],
[0, 0, 7]])
assert_equal(construct.bmat([[A,None],[None,C]]).todense(), expected)
expected = matrix([[0, 5],
[0, 6],
[7, 0]])
assert_equal(construct.bmat([[None,B],[C,None]]).todense(), expected)
expected = matrix(np.empty((0,0)))
assert_equal(construct.bmat([[None,None]]).todense(), expected)
assert_equal(construct.bmat([[None,D],[D,None]]).todense(), expected)
# test bug reported in gh-5976
expected = matrix([[7]])
assert_equal(construct.bmat([[None,D],[C,None]]).todense(), expected)
# test failure cases
with assert_raises(ValueError) as excinfo:
construct.bmat([[A], [B]])
excinfo.match(r'Got blocks\[1,0\]\.shape\[1\] == 1, expected 2')
with assert_raises(ValueError) as excinfo:
construct.bmat([[A, C]])
excinfo.match(r'Got blocks\[0,1\]\.shape\[0\] == 1, expected 2')
@pytest.mark.slow
def test_concatenate_int32_overflow(self):
""" test for indptr overflow when concatenating matrices """
check_free_memory(30000)
n = 33000
A = csr_matrix(np.ones((n, n), dtype=bool))
B = A.copy()
C = construct._compressed_sparse_stack((A,B), 0)
assert_(np.all(np.equal(np.diff(C.indptr), n)))
assert_equal(C.indices.dtype, np.int64)
assert_equal(C.indptr.dtype, np.int64)
def test_block_diag_basic(self):
""" basic test for block_diag """
A = coo_matrix([[1,2],[3,4]])
B = coo_matrix([[5],[6]])
C = coo_matrix([[7]])
expected = matrix([[1, 2, 0, 0],
[3, 4, 0, 0],
[0, 0, 5, 0],
[0, 0, 6, 0],
[0, 0, 0, 7]])
assert_equal(construct.block_diag((A, B, C)).todense(), expected)
def test_block_diag_scalar_1d_args(self):
""" block_diag with scalar and 1d arguments """
# one 1d matrix and a scalar
assert_array_equal(construct.block_diag([[2,3], 4]).toarray(),
[[2, 3, 0], [0, 0, 4]])
def test_block_diag_1(self):
""" block_diag with one matrix """
assert_equal(construct.block_diag([[1, 0]]).todense(),
matrix([[1, 0]]))
assert_equal(construct.block_diag([[[1, 0]]]).todense(),
matrix([[1, 0]]))
assert_equal(construct.block_diag([[[1], [0]]]).todense(),
matrix([[1], [0]]))
# just on scalar
assert_equal(construct.block_diag([1]).todense(),
matrix([[1]]))
def test_random_sampling(self):
# Simple sanity checks for sparse random sampling.
for f in sprand, _sprandn:
for t in [np.float32, np.float64, np.longdouble,
np.int32, np.int64, np.complex64, np.complex128]:
x = f(5, 10, density=0.1, dtype=t)
assert_equal(x.dtype, t)
assert_equal(x.shape, (5, 10))
assert_equal(x.nnz, 5)
x1 = f(5, 10, density=0.1, random_state=4321)
assert_equal(x1.dtype, np.double)
x2 = f(5, 10, density=0.1,
random_state=np.random.RandomState(4321))
assert_array_equal(x1.data, x2.data)
assert_array_equal(x1.row, x2.row)
assert_array_equal(x1.col, x2.col)
for density in [0.0, 0.1, 0.5, 1.0]:
x = f(5, 10, density=density)
assert_equal(x.nnz, int(density * np.prod(x.shape)))
for fmt in ['coo', 'csc', 'csr', 'lil']:
x = f(5, 10, format=fmt)
assert_equal(x.format, fmt)
assert_raises(ValueError, lambda: f(5, 10, 1.1))
assert_raises(ValueError, lambda: f(5, 10, -0.1))
def test_rand(self):
# Simple distributional checks for sparse.rand.
random_states = [None, 4321, np.random.RandomState()]
try:
gen = np.random.default_rng()
random_states.append(gen)
except AttributeError:
pass
for random_state in random_states:
x = sprand(10, 20, density=0.5, dtype=np.float64,
random_state=random_state)
assert_(np.all(np.less_equal(0, x.data)))
assert_(np.all(np.less_equal(x.data, 1)))
def test_randn(self):
# Simple distributional checks for sparse.randn.
# Statistically, some of these should be negative
# and some should be greater than 1.
random_states = [None, 4321, np.random.RandomState()]
try:
gen = np.random.default_rng()
random_states.append(gen)
except AttributeError:
pass
for random_state in random_states:
x = _sprandn(10, 20, density=0.5, dtype=np.float64,
random_state=random_state)
assert_(np.any(np.less(x.data, 0)))
assert_(np.any(np.less(1, x.data)))
def test_random_accept_str_dtype(self):
# anything that np.dtype can convert to a dtype should be accepted
# for the dtype
construct.random(10, 10, dtype='d')
|
"""
`schemas.errors` module defines pydantic models for different error responses.
"""
from pydantic import BaseModel
class NotFoundTask(BaseModel):
error: str = "Task not found by ID"
class NotFoundTopic(BaseModel):
error: str = "Topic not found by ID"
class RateLimitExceeded(BaseModel):
error: str = "Rate limit exceeded: 2 per 1 minute"
class DockerUnavailable(BaseModel):
error: str = "Docker problems, please try again later."
class EmptyRequest(BaseModel):
error: str = "The request was empty"
class InactiveUser(BaseModel):
error: str = "Inactive user"
class EmailAlreadyTaken(BaseModel):
error: str = "Email already registered"
class NoUserEmail(BaseModel):
error: str = "There is no user who have already registered with this email address."
|
""" Path utilities for benchbuild. """
import os
import sys
def list_to_path(pathlist):
"""Convert a list of path elements to a path string."""
return os.path.pathsep.join(pathlist)
def path_to_list(pathstr):
"""Conver a path string to a list of path elements."""
return [elem for elem in pathstr.split(os.path.pathsep) if elem]
def determine_path():
"""Borrowed from wxglade.py"""
try:
root = __file__
if os.path.islink(root):
root = os.path.realpath(root)
return os.path.dirname(os.path.abspath(root))
except:
print("I'm sorry, but something is wrong.")
print("There is no __file__ variable. Please contact the author.")
sys.exit()
def template_files(path, exts=[]):
"""
Return a list of filenames found at @path.
The list of filenames can be filtered by extensions.
Arguments:
path: Existing filepath we want to list.
exts: List of extensions to filter by.
Returns:
A list of filenames found in the path.
"""
if not os.path.isabs(path):
_path = os.path.join(determine_path(), path)
if not (os.path.exists(_path) and os.path.isdir(_path)):
return []
files = os.listdir(_path)
files = [f for f in files if os.path.splitext(f)[-1] in exts]
files = [os.path.join(path, f) for f in files]
return files
def template_str(template):
"""Read a template file from the resources and return it as str."""
tmpl_file = os.path.join(determine_path(), template)
with open(tmpl_file, mode='r') as tmpl_strm:
return "".join(tmpl_strm.readlines())
def mkfile_uchroot(filepath, root="."):
"""
Create a file inside a uchroot env.
You will want to use this when you need to create a file with apropriate
rights inside a uchroot container with subuid/subgid handling enabled.
Args:
filepath:
The filepath that should be created. Absolute inside the
uchroot container.
root:
The root PATH of the container filesystem as seen outside of
the container.
"""
from benchbuild.utils.run import uchroot_no_args
uchroot = uchroot_no_args()
uchroot = uchroot["-E", "-A", "-C", "-w", "/", "-r"]
uchroot = uchroot[os.path.abspath(root)]
uchroot("--", "/bin/touch", filepath)
def mkdir_uchroot(dirpath, root="."):
"""
Create a file inside a uchroot env.
You will want to use this when you need to create a file with apropriate
rights inside a uchroot container with subuid/subgid handling enabled.
Args:
dirpath:
The dirpath that should be created. Absolute inside the
uchroot container.
root:
The root PATH of the container filesystem as seen outside of
the container.
"""
from benchbuild.utils.run import uchroot_no_args
uchroot = uchroot_no_args()
uchroot = uchroot["-E", "-A", "-C", "-w", "/", "-r"]
uchroot = uchroot[os.path.abspath(root)]
uchroot("--", "/bin/mkdir", "-p", dirpath)
|
from flask import render_template,request,redirect,url_for,abort,flash
from . import main
from ..models import User, Blog, Comment
from flask_login import login_required,current_user
from datetime import datetime, timezone
from .. import db
from .forms import BlogForm,CommentForm
# Views index
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
title = 'Home'
posts = Blog.get_posts()
return render_template('index.html', title = title, posts=posts )
# view function to render a selected article and its comments
@main.route('/post/<int:id>', methods=['GET','POST'])
def single_line(id):
'''
View function to return a single article
'''
article = Blog.query.get(id)
title = "article"
comments = Comment.get_comments(id)
return render_template('single-blog.html',article = article,title = title, comments=comments)
# view route to post a blog
@main.route('/new/blog', methods=['GET','POST'])
def new_blog():
'''
route to avail form for writing a new blog
'''
form = BlogForm()
if form.validate_on_submit():
title = form.title.data
blog = form.blog.data
new_blog = Blog(title = title, blog = blog )
new_blog.save_blog()
return redirect(url_for('main.index'))
return render_template('new_blog.html',form = form)
#delete articles
@main.route('/delete/blog/<int:id>', methods=['GET','POST'])
def delete_blog(id):
"""
view route to delete a selected post
"""
article = Blog.query.filter_by(id=id).first()
if article is None:
abort(404)
article.delete_blog()
return redirect(url_for('main.index'))
#commenting route
@main.route('/post/comment/new/<int:id>', methods=['GET','POST'])
@login_required
def new_comment(id):
'''
View new comment function that returns a page with a form to create a comment for the specified post
'''
post = Blog.query.filter_by(id=id).first()
if post is None:
abort(404)
form = CommentForm()
if form.validate_on_submit():
opinion = form.opinion.data
new_comment = Comment( opinion=opinion, articles_id=id, user_id=current_user.id)
new_comment.save_comment()
return redirect(url_for('main.index'))
title = 'New Comment'
return render_template('new_comment.html', title=title, comment_form=form)
#delete selected comment
@main.route('/delete/comment/<int:id>', methods=['GET','POST'])
def delete_selected_comment(id):
"""
view route to delete a selected comment
"""
comment = Comment.query.get(id)
comment.delete_comment(id)
return redirect(url_for('main.index'))
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrappers for protocol buffer enum types."""
import enum
class AccessReasonEnum(object):
class AccessReason(enum.IntEnum):
"""
Enum describing possible access reasons.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
OWNED (int): The resource is owned by the user.
SHARED (int): The resource is shared to the user.
LICENSED (int): The resource is licensed to the user.
SUBSCRIBED (int): The user subscribed to the resource.
AFFILIATED (int): The resource is accessible to the user.
"""
UNSPECIFIED = 0
UNKNOWN = 1
OWNED = 2
SHARED = 3
LICENSED = 4
SUBSCRIBED = 5
AFFILIATED = 6
class AccountBudgetProposalErrorEnum(object):
class AccountBudgetProposalError(enum.IntEnum):
"""
Enum describing possible account budget proposal errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
FIELD_MASK_NOT_ALLOWED (int): The field mask must be empty for create/end/remove proposals.
IMMUTABLE_FIELD (int): The field cannot be set because of the proposal type.
REQUIRED_FIELD_MISSING (int): The field is required because of the proposal type.
CANNOT_CANCEL_APPROVED_PROPOSAL (int): Proposals that have been approved cannot be cancelled.
CANNOT_REMOVE_UNAPPROVED_BUDGET (int): Budgets that haven't been approved cannot be removed.
CANNOT_REMOVE_RUNNING_BUDGET (int): Budgets that are currently running cannot be removed.
CANNOT_END_UNAPPROVED_BUDGET (int): Budgets that haven't been approved cannot be truncated.
CANNOT_END_INACTIVE_BUDGET (int): Only budgets that are currently running can be truncated.
BUDGET_NAME_REQUIRED (int): All budgets must have names.
CANNOT_UPDATE_OLD_BUDGET (int): Expired budgets cannot be edited after a sufficient amount of time has
passed.
CANNOT_END_IN_PAST (int): It is not permissible a propose a new budget that ends in the past.
CANNOT_EXTEND_END_TIME (int): An expired budget cannot be extended to overlap with the running budget.
PURCHASE_ORDER_NUMBER_REQUIRED (int): A purchase order number is required.
PENDING_UPDATE_PROPOSAL_EXISTS (int): Budgets that have a pending update cannot be updated.
MULTIPLE_BUDGETS_NOT_ALLOWED_FOR_UNAPPROVED_BILLING_SETUP (int): Cannot propose more than one budget when the corresponding billing setup
hasn't been approved.
CANNOT_UPDATE_START_TIME_FOR_STARTED_BUDGET (int): Cannot update the start time of a budget that has already started.
SPENDING_LIMIT_LOWER_THAN_ACCRUED_COST_NOT_ALLOWED (int): Cannot update the spending limit of a budget with an amount lower than
what has already been spent.
UPDATE_IS_NO_OP (int): Cannot propose a budget update without actually changing any fields.
END_TIME_MUST_FOLLOW_START_TIME (int): The end time must come after the start time.
BUDGET_DATE_RANGE_INCOMPATIBLE_WITH_BILLING_SETUP (int): The budget's date range must fall within the date range of its billing
setup.
NOT_AUTHORIZED (int): The user is not authorized to mutate budgets for the given billing setup.
INVALID_BILLING_SETUP (int): Mutates are not allowed for the given billing setup.
"""
UNSPECIFIED = 0
UNKNOWN = 1
FIELD_MASK_NOT_ALLOWED = 2
IMMUTABLE_FIELD = 3
REQUIRED_FIELD_MISSING = 4
CANNOT_CANCEL_APPROVED_PROPOSAL = 5
CANNOT_REMOVE_UNAPPROVED_BUDGET = 6
CANNOT_REMOVE_RUNNING_BUDGET = 7
CANNOT_END_UNAPPROVED_BUDGET = 8
CANNOT_END_INACTIVE_BUDGET = 9
BUDGET_NAME_REQUIRED = 10
CANNOT_UPDATE_OLD_BUDGET = 11
CANNOT_END_IN_PAST = 12
CANNOT_EXTEND_END_TIME = 13
PURCHASE_ORDER_NUMBER_REQUIRED = 14
PENDING_UPDATE_PROPOSAL_EXISTS = 15
MULTIPLE_BUDGETS_NOT_ALLOWED_FOR_UNAPPROVED_BILLING_SETUP = 16
CANNOT_UPDATE_START_TIME_FOR_STARTED_BUDGET = 17
SPENDING_LIMIT_LOWER_THAN_ACCRUED_COST_NOT_ALLOWED = 18
UPDATE_IS_NO_OP = 19
END_TIME_MUST_FOLLOW_START_TIME = 20
BUDGET_DATE_RANGE_INCOMPATIBLE_WITH_BILLING_SETUP = 21
NOT_AUTHORIZED = 22
INVALID_BILLING_SETUP = 23
class AccountBudgetProposalStatusEnum(object):
class AccountBudgetProposalStatus(enum.IntEnum):
"""
The possible statuses of an AccountBudgetProposal.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PENDING (int): The proposal is pending approval.
APPROVED_HELD (int): The proposal has been approved but the corresponding billing setup
has not. This can occur for proposals that set up the first budget
when signing up for billing or when performing a change of bill-to
operation.
APPROVED (int): The proposal has been approved.
CANCELLED (int): The proposal has been cancelled by the user.
REJECTED (int): The proposal has been rejected by the user, e.g. by rejecting an
acceptance email.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PENDING = 2
APPROVED_HELD = 3
APPROVED = 4
CANCELLED = 5
REJECTED = 6
class AccountBudgetProposalTypeEnum(object):
class AccountBudgetProposalType(enum.IntEnum):
"""
The possible types of an AccountBudgetProposal.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
CREATE (int): Identifies a request to create a new budget.
UPDATE (int): Identifies a request to edit an existing budget.
END (int): Identifies a request to end a budget that has already started.
REMOVE (int): Identifies a request to remove a budget that hasn't started yet.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CREATE = 2
UPDATE = 3
END = 4
REMOVE = 5
class AccountBudgetStatusEnum(object):
class AccountBudgetStatus(enum.IntEnum):
"""
The possible statuses of an AccountBudget.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PENDING (int): The account budget is pending approval.
APPROVED (int): The account budget has been approved.
CANCELLED (int): The account budget has been cancelled by the user.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PENDING = 2
APPROVED = 3
CANCELLED = 4
class AdCustomizerErrorEnum(object):
class AdCustomizerError(enum.IntEnum):
"""
Enum describing possible ad customizer errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
COUNTDOWN_INVALID_DATE_FORMAT (int): Invalid date argument in countdown function.
COUNTDOWN_DATE_IN_PAST (int): Countdown end date is in the past.
COUNTDOWN_INVALID_LOCALE (int): Invalid locale string in countdown function.
COUNTDOWN_INVALID_START_DAYS_BEFORE (int): Days-before argument to countdown function is not positive.
UNKNOWN_USER_LIST (int): A user list referenced in an IF function does not exist.
"""
UNSPECIFIED = 0
UNKNOWN = 1
COUNTDOWN_INVALID_DATE_FORMAT = 2
COUNTDOWN_DATE_IN_PAST = 3
COUNTDOWN_INVALID_LOCALE = 4
COUNTDOWN_INVALID_START_DAYS_BEFORE = 5
UNKNOWN_USER_LIST = 6
class AdCustomizerPlaceholderFieldEnum(object):
class AdCustomizerPlaceholderField(enum.IntEnum):
"""
Possible values for Ad Customizers placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
INTEGER (int): Data Type: INT64. Integer value to be inserted.
PRICE (int): Data Type: STRING. Price value to be inserted.
DATE (int): Data Type: DATE\_TIME. Date value to be inserted.
STRING (int): Data Type: STRING. String value to be inserted.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INTEGER = 2
PRICE = 3
DATE = 4
STRING = 5
class AdErrorEnum(object):
class AdError(enum.IntEnum):
"""
Enum describing possible ad errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
AD_CUSTOMIZERS_NOT_SUPPORTED_FOR_AD_TYPE (int): Ad customizers are not supported for ad type.
APPROXIMATELY_TOO_LONG (int): Estimating character sizes the string is too long.
APPROXIMATELY_TOO_SHORT (int): Estimating character sizes the string is too short.
BAD_SNIPPET (int): There is a problem with the snippet.
CANNOT_MODIFY_AD (int): Cannot modify an ad.
CANNOT_SET_BUSINESS_NAME_IF_URL_SET (int): business name and url cannot be set at the same time
CANNOT_SET_FIELD (int): The specified field is incompatible with this ad's type or settings.
CANNOT_SET_FIELD_WITH_ORIGIN_AD_ID_SET (int): Cannot set field when originAdId is set.
CANNOT_SET_FIELD_WITH_AD_ID_SET_FOR_SHARING (int): Cannot set field when an existing ad id is set for sharing.
CANNOT_SET_ALLOW_FLEXIBLE_COLOR_FALSE (int): Cannot set allowFlexibleColor false if no color is provided by user.
CANNOT_SET_COLOR_CONTROL_WHEN_NATIVE_FORMAT_SETTING (int): When user select native, no color control is allowed because we will
always respect publisher color for native format serving.
CANNOT_SET_URL (int): Cannot specify a url for the ad type
CANNOT_SET_WITHOUT_FINAL_URLS (int): Cannot specify a tracking or mobile url without also setting final urls
CANNOT_SET_WITH_FINAL_URLS (int): Cannot specify a legacy url and a final url simultaneously
CANNOT_SET_WITH_URL_DATA (int): Cannot specify a urls in UrlData and in template fields simultaneously.
CANNOT_USE_AD_SUBCLASS_FOR_OPERATOR (int): This operator cannot be used with a subclass of Ad.
CUSTOMER_NOT_APPROVED_MOBILEADS (int): Customer is not approved for mobile ads.
CUSTOMER_NOT_APPROVED_THIRDPARTY_ADS (int): Customer is not approved for 3PAS richmedia ads.
CUSTOMER_NOT_APPROVED_THIRDPARTY_REDIRECT_ADS (int): Customer is not approved for 3PAS redirect richmedia (Ad Exchange) ads.
CUSTOMER_NOT_ELIGIBLE (int): Not an eligible customer
CUSTOMER_NOT_ELIGIBLE_FOR_UPDATING_BEACON_URL (int): Customer is not eligible for updating beacon url
DIMENSION_ALREADY_IN_UNION (int): There already exists an ad with the same dimensions in the union.
DIMENSION_MUST_BE_SET (int): Ad's dimension must be set before setting union dimension.
DIMENSION_NOT_IN_UNION (int): Ad's dimension must be included in the union dimensions.
DISPLAY_URL_CANNOT_BE_SPECIFIED (int): Display Url cannot be specified (applies to Ad Exchange Ads)
DOMESTIC_PHONE_NUMBER_FORMAT (int): Telephone number contains invalid characters or invalid format. Please
re-enter your number using digits (0-9), dashes (-), and parentheses
only.
EMERGENCY_PHONE_NUMBER (int): Emergency telephone numbers are not allowed. Please enter a valid
domestic phone number to connect customers to your business.
EMPTY_FIELD (int): A required field was not specified or is an empty string.
FEED_ATTRIBUTE_MUST_HAVE_MAPPING_FOR_TYPE_ID (int): A feed attribute referenced in an ad customizer tag is not in the ad
customizer mapping for the feed.
FEED_ATTRIBUTE_MAPPING_TYPE_MISMATCH (int): The ad customizer field mapping for the feed attribute does not match the
expected field type.
ILLEGAL_AD_CUSTOMIZER_TAG_USE (int): The use of ad customizer tags in the ad text is disallowed. Details in
trigger.
ILLEGAL_TAG_USE (int): Tags of the form {PH\_x}, where x is a number, are disallowed in ad
text.
INCONSISTENT_DIMENSIONS (int): The dimensions of the ad are specified or derived in multiple ways and
are not consistent.
INCONSISTENT_STATUS_IN_TEMPLATE_UNION (int): The status cannot differ among template ads of the same union.
INCORRECT_LENGTH (int): The length of the string is not valid.
INELIGIBLE_FOR_UPGRADE (int): The ad is ineligible for upgrade.
INVALID_AD_ADDRESS_CAMPAIGN_TARGET (int): User cannot create mobile ad for countries targeted in specified
campaign.
INVALID_AD_TYPE (int): Invalid Ad type. A specific type of Ad is required.
INVALID_ATTRIBUTES_FOR_MOBILE_IMAGE (int): Headline, description or phone cannot be present when creating mobile
image ad.
INVALID_ATTRIBUTES_FOR_MOBILE_TEXT (int): Image cannot be present when creating mobile text ad.
INVALID_CALL_TO_ACTION_TEXT (int): Invalid call to action text.
INVALID_CHARACTER_FOR_URL (int): Invalid character in URL.
INVALID_COUNTRY_CODE (int): Creative's country code is not valid.
INVALID_EXPANDED_DYNAMIC_SEARCH_AD_TAG (int): Invalid use of Expanded Dynamic Search Ads tags ({lpurl} etc.)
INVALID_INPUT (int): An input error whose real reason was not properly mapped (should not
happen).
INVALID_MARKUP_LANGUAGE (int): An invalid markup language was entered.
INVALID_MOBILE_CARRIER (int): An invalid mobile carrier was entered.
INVALID_MOBILE_CARRIER_TARGET (int): Specified mobile carriers target a country not targeted by the campaign.
INVALID_NUMBER_OF_ELEMENTS (int): Wrong number of elements for given element type
INVALID_PHONE_NUMBER_FORMAT (int): The format of the telephone number is incorrect. Please re-enter the
number using the correct format.
INVALID_RICH_MEDIA_CERTIFIED_VENDOR_FORMAT_ID (int): The certified vendor format id is incorrect.
INVALID_TEMPLATE_DATA (int): The template ad data contains validation errors.
INVALID_TEMPLATE_ELEMENT_FIELD_TYPE (int): The template field doesn't have have the correct type.
INVALID_TEMPLATE_ID (int): Invalid template id.
LINE_TOO_WIDE (int): After substituting replacement strings, the line is too wide.
MISSING_AD_CUSTOMIZER_MAPPING (int): The feed referenced must have ad customizer mapping to be used in a
customizer tag.
MISSING_ADDRESS_COMPONENT (int): Missing address component in template element address field.
MISSING_ADVERTISEMENT_NAME (int): An ad name must be entered.
MISSING_BUSINESS_NAME (int): Business name must be entered.
MISSING_DESCRIPTION1 (int): Description (line 2) must be entered.
MISSING_DESCRIPTION2 (int): Description (line 3) must be entered.
MISSING_DESTINATION_URL_TAG (int): The destination url must contain at least one tag (e.g. {lpurl})
MISSING_LANDING_PAGE_URL_TAG (int): The tracking url template of ExpandedDynamicSearchAd must contain at
least one tag. (e.g. {lpurl})
MISSING_DIMENSION (int): A valid dimension must be specified for this ad.
MISSING_DISPLAY_URL (int): A display URL must be entered.
MISSING_HEADLINE (int): Headline must be entered.
MISSING_HEIGHT (int): A height must be entered.
MISSING_IMAGE (int): An image must be entered.
MISSING_MARKETING_IMAGE_OR_PRODUCT_VIDEOS (int): Marketing image or product videos are required.
MISSING_MARKUP_LANGUAGES (int): The markup language in which your site is written must be entered.
MISSING_MOBILE_CARRIER (int): A mobile carrier must be entered.
MISSING_PHONE (int): Phone number must be entered.
MISSING_REQUIRED_TEMPLATE_FIELDS (int): Missing required template fields
MISSING_TEMPLATE_FIELD_VALUE (int): Missing a required field value
MISSING_TEXT (int): The ad must have text.
MISSING_VISIBLE_URL (int): A visible URL must be entered.
MISSING_WIDTH (int): A width must be entered.
MULTIPLE_DISTINCT_FEEDS_UNSUPPORTED (int): Only 1 feed can be used as the source of ad customizer substitutions in a
single ad.
MUST_USE_TEMP_AD_UNION_ID_ON_ADD (int): TempAdUnionId must be use when adding template ads.
TOO_LONG (int): The string has too many characters.
TOO_SHORT (int): The string has too few characters.
UNION_DIMENSIONS_CANNOT_CHANGE (int): Ad union dimensions cannot change for saved ads.
UNKNOWN_ADDRESS_COMPONENT (int): Address component is not {country, lat, lng}.
UNKNOWN_FIELD_NAME (int): Unknown unique field name
UNKNOWN_UNIQUE_NAME (int): Unknown unique name (template element type specifier)
UNSUPPORTED_DIMENSIONS (int): Unsupported ad dimension
URL_INVALID_SCHEME (int): URL starts with an invalid scheme.
URL_INVALID_TOP_LEVEL_DOMAIN (int): URL ends with an invalid top-level domain name.
URL_MALFORMED (int): URL contains illegal characters.
URL_NO_HOST (int): URL must contain a host name.
URL_NOT_EQUIVALENT (int): URL not equivalent during upgrade.
URL_HOST_NAME_TOO_LONG (int): URL host name too long to be stored as visible URL (applies to Ad
Exchange ads)
URL_NO_SCHEME (int): URL must start with a scheme.
URL_NO_TOP_LEVEL_DOMAIN (int): URL should end in a valid domain extension, such as .com or .net.
URL_PATH_NOT_ALLOWED (int): URL must not end with a path.
URL_PORT_NOT_ALLOWED (int): URL must not specify a port.
URL_QUERY_NOT_ALLOWED (int): URL must not contain a query.
URL_SCHEME_BEFORE_EXPANDED_DYNAMIC_SEARCH_AD_TAG (int): A url scheme is not allowed in front of tag in tracking url template
(e.g. http://{lpurl})
USER_DOES_NOT_HAVE_ACCESS_TO_TEMPLATE (int): The user does not have permissions to create a template ad for the given
template.
INCONSISTENT_EXPANDABLE_SETTINGS (int): Expandable setting is inconsistent/wrong. For example, an AdX ad is
invalid if it has a expandable vendor format but no expanding directions
specified, or expanding directions is specified, but the vendor format is
not expandable.
INVALID_FORMAT (int): Format is invalid
INVALID_FIELD_TEXT (int): The text of this field did not match a pattern of allowed values.
ELEMENT_NOT_PRESENT (int): Template element is mising
IMAGE_ERROR (int): Error occurred during image processing
VALUE_NOT_IN_RANGE (int): The value is not within the valid range
FIELD_NOT_PRESENT (int): Template element field is not present
ADDRESS_NOT_COMPLETE (int): Address is incomplete
ADDRESS_INVALID (int): Invalid address
VIDEO_RETRIEVAL_ERROR (int): Error retrieving specified video
AUDIO_ERROR (int): Error processing audio
INVALID_YOUTUBE_DISPLAY_URL (int): Display URL is incorrect for YouTube PYV ads
TOO_MANY_PRODUCT_IMAGES (int): Too many product Images in GmailAd
TOO_MANY_PRODUCT_VIDEOS (int): Too many product Videos in GmailAd
INCOMPATIBLE_AD_TYPE_AND_DEVICE_PREFERENCE (int): The device preference is not compatible with the ad type
CALLTRACKING_NOT_SUPPORTED_FOR_COUNTRY (int): Call tracking is not supported for specified country.
CARRIER_SPECIFIC_SHORT_NUMBER_NOT_ALLOWED (int): Carrier specific short number is not allowed.
DISALLOWED_NUMBER_TYPE (int): Specified phone number type is disallowed.
PHONE_NUMBER_NOT_SUPPORTED_FOR_COUNTRY (int): Phone number not supported for country.
PHONE_NUMBER_NOT_SUPPORTED_WITH_CALLTRACKING_FOR_COUNTRY (int): Phone number not supported with call tracking enabled for country.
PREMIUM_RATE_NUMBER_NOT_ALLOWED (int): Premium rate phone number is not allowed.
VANITY_PHONE_NUMBER_NOT_ALLOWED (int): Vanity phone number is not allowed.
INVALID_CALL_CONVERSION_TYPE_ID (int): Invalid call conversion type id.
CANNOT_DISABLE_CALL_CONVERSION_AND_SET_CONVERSION_TYPE_ID (int): Cannot disable call conversion and set conversion type id.
CANNOT_SET_PATH2_WITHOUT_PATH1 (int): Cannot set path2 without path1.
MISSING_DYNAMIC_SEARCH_ADS_SETTING_DOMAIN_NAME (int): Missing domain name in campaign setting when adding expanded dynamic
search ad.
INCOMPATIBLE_WITH_RESTRICTION_TYPE (int): The associated ad is not compatible with restriction type.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AD_CUSTOMIZERS_NOT_SUPPORTED_FOR_AD_TYPE = 2
APPROXIMATELY_TOO_LONG = 3
APPROXIMATELY_TOO_SHORT = 4
BAD_SNIPPET = 5
CANNOT_MODIFY_AD = 6
CANNOT_SET_BUSINESS_NAME_IF_URL_SET = 7
CANNOT_SET_FIELD = 8
CANNOT_SET_FIELD_WITH_ORIGIN_AD_ID_SET = 9
CANNOT_SET_FIELD_WITH_AD_ID_SET_FOR_SHARING = 10
CANNOT_SET_ALLOW_FLEXIBLE_COLOR_FALSE = 11
CANNOT_SET_COLOR_CONTROL_WHEN_NATIVE_FORMAT_SETTING = 12
CANNOT_SET_URL = 13
CANNOT_SET_WITHOUT_FINAL_URLS = 14
CANNOT_SET_WITH_FINAL_URLS = 15
CANNOT_SET_WITH_URL_DATA = 17
CANNOT_USE_AD_SUBCLASS_FOR_OPERATOR = 18
CUSTOMER_NOT_APPROVED_MOBILEADS = 19
CUSTOMER_NOT_APPROVED_THIRDPARTY_ADS = 20
CUSTOMER_NOT_APPROVED_THIRDPARTY_REDIRECT_ADS = 21
CUSTOMER_NOT_ELIGIBLE = 22
CUSTOMER_NOT_ELIGIBLE_FOR_UPDATING_BEACON_URL = 23
DIMENSION_ALREADY_IN_UNION = 24
DIMENSION_MUST_BE_SET = 25
DIMENSION_NOT_IN_UNION = 26
DISPLAY_URL_CANNOT_BE_SPECIFIED = 27
DOMESTIC_PHONE_NUMBER_FORMAT = 28
EMERGENCY_PHONE_NUMBER = 29
EMPTY_FIELD = 30
FEED_ATTRIBUTE_MUST_HAVE_MAPPING_FOR_TYPE_ID = 31
FEED_ATTRIBUTE_MAPPING_TYPE_MISMATCH = 32
ILLEGAL_AD_CUSTOMIZER_TAG_USE = 33
ILLEGAL_TAG_USE = 34
INCONSISTENT_DIMENSIONS = 35
INCONSISTENT_STATUS_IN_TEMPLATE_UNION = 36
INCORRECT_LENGTH = 37
INELIGIBLE_FOR_UPGRADE = 38
INVALID_AD_ADDRESS_CAMPAIGN_TARGET = 39
INVALID_AD_TYPE = 40
INVALID_ATTRIBUTES_FOR_MOBILE_IMAGE = 41
INVALID_ATTRIBUTES_FOR_MOBILE_TEXT = 42
INVALID_CALL_TO_ACTION_TEXT = 43
INVALID_CHARACTER_FOR_URL = 44
INVALID_COUNTRY_CODE = 45
INVALID_EXPANDED_DYNAMIC_SEARCH_AD_TAG = 47
INVALID_INPUT = 48
INVALID_MARKUP_LANGUAGE = 49
INVALID_MOBILE_CARRIER = 50
INVALID_MOBILE_CARRIER_TARGET = 51
INVALID_NUMBER_OF_ELEMENTS = 52
INVALID_PHONE_NUMBER_FORMAT = 53
INVALID_RICH_MEDIA_CERTIFIED_VENDOR_FORMAT_ID = 54
INVALID_TEMPLATE_DATA = 55
INVALID_TEMPLATE_ELEMENT_FIELD_TYPE = 56
INVALID_TEMPLATE_ID = 57
LINE_TOO_WIDE = 58
MISSING_AD_CUSTOMIZER_MAPPING = 59
MISSING_ADDRESS_COMPONENT = 60
MISSING_ADVERTISEMENT_NAME = 61
MISSING_BUSINESS_NAME = 62
MISSING_DESCRIPTION1 = 63
MISSING_DESCRIPTION2 = 64
MISSING_DESTINATION_URL_TAG = 65
MISSING_LANDING_PAGE_URL_TAG = 66
MISSING_DIMENSION = 67
MISSING_DISPLAY_URL = 68
MISSING_HEADLINE = 69
MISSING_HEIGHT = 70
MISSING_IMAGE = 71
MISSING_MARKETING_IMAGE_OR_PRODUCT_VIDEOS = 72
MISSING_MARKUP_LANGUAGES = 73
MISSING_MOBILE_CARRIER = 74
MISSING_PHONE = 75
MISSING_REQUIRED_TEMPLATE_FIELDS = 76
MISSING_TEMPLATE_FIELD_VALUE = 77
MISSING_TEXT = 78
MISSING_VISIBLE_URL = 79
MISSING_WIDTH = 80
MULTIPLE_DISTINCT_FEEDS_UNSUPPORTED = 81
MUST_USE_TEMP_AD_UNION_ID_ON_ADD = 82
TOO_LONG = 83
TOO_SHORT = 84
UNION_DIMENSIONS_CANNOT_CHANGE = 85
UNKNOWN_ADDRESS_COMPONENT = 86
UNKNOWN_FIELD_NAME = 87
UNKNOWN_UNIQUE_NAME = 88
UNSUPPORTED_DIMENSIONS = 89
URL_INVALID_SCHEME = 90
URL_INVALID_TOP_LEVEL_DOMAIN = 91
URL_MALFORMED = 92
URL_NO_HOST = 93
URL_NOT_EQUIVALENT = 94
URL_HOST_NAME_TOO_LONG = 95
URL_NO_SCHEME = 96
URL_NO_TOP_LEVEL_DOMAIN = 97
URL_PATH_NOT_ALLOWED = 98
URL_PORT_NOT_ALLOWED = 99
URL_QUERY_NOT_ALLOWED = 100
URL_SCHEME_BEFORE_EXPANDED_DYNAMIC_SEARCH_AD_TAG = 102
USER_DOES_NOT_HAVE_ACCESS_TO_TEMPLATE = 103
INCONSISTENT_EXPANDABLE_SETTINGS = 104
INVALID_FORMAT = 105
INVALID_FIELD_TEXT = 106
ELEMENT_NOT_PRESENT = 107
IMAGE_ERROR = 108
VALUE_NOT_IN_RANGE = 109
FIELD_NOT_PRESENT = 110
ADDRESS_NOT_COMPLETE = 111
ADDRESS_INVALID = 112
VIDEO_RETRIEVAL_ERROR = 113
AUDIO_ERROR = 114
INVALID_YOUTUBE_DISPLAY_URL = 115
TOO_MANY_PRODUCT_IMAGES = 116
TOO_MANY_PRODUCT_VIDEOS = 117
INCOMPATIBLE_AD_TYPE_AND_DEVICE_PREFERENCE = 118
CALLTRACKING_NOT_SUPPORTED_FOR_COUNTRY = 119
CARRIER_SPECIFIC_SHORT_NUMBER_NOT_ALLOWED = 120
DISALLOWED_NUMBER_TYPE = 121
PHONE_NUMBER_NOT_SUPPORTED_FOR_COUNTRY = 122
PHONE_NUMBER_NOT_SUPPORTED_WITH_CALLTRACKING_FOR_COUNTRY = 123
PREMIUM_RATE_NUMBER_NOT_ALLOWED = 124
VANITY_PHONE_NUMBER_NOT_ALLOWED = 125
INVALID_CALL_CONVERSION_TYPE_ID = 126
CANNOT_DISABLE_CALL_CONVERSION_AND_SET_CONVERSION_TYPE_ID = 127
CANNOT_SET_PATH2_WITHOUT_PATH1 = 128
MISSING_DYNAMIC_SEARCH_ADS_SETTING_DOMAIN_NAME = 129
INCOMPATIBLE_WITH_RESTRICTION_TYPE = 130
class AdGroupAdErrorEnum(object):
class AdGroupAdError(enum.IntEnum):
"""
Enum describing possible ad group ad errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
AD_GROUP_AD_LABEL_DOES_NOT_EXIST (int): No link found between the adgroup ad and the label.
AD_GROUP_AD_LABEL_ALREADY_EXISTS (int): The label has already been attached to the adgroup ad.
AD_NOT_UNDER_ADGROUP (int): The specified ad was not found in the adgroup
CANNOT_OPERATE_ON_REMOVED_ADGROUPAD (int): Removed ads may not be modified
CANNOT_CREATE_DEPRECATED_ADS (int): An ad of this type is deprecated and cannot be created. Only deletions
are permitted.
CANNOT_CREATE_TEXT_ADS (int): Text ads are deprecated and cannot be created. Use expanded text ads
instead.
EMPTY_FIELD (int): A required field was not specified or is an empty string.
RESOURCE_REFERENCED_IN_MULTIPLE_OPS (int): An ad may only be modified once per call
"""
UNSPECIFIED = 0
UNKNOWN = 1
AD_GROUP_AD_LABEL_DOES_NOT_EXIST = 2
AD_GROUP_AD_LABEL_ALREADY_EXISTS = 3
AD_NOT_UNDER_ADGROUP = 4
CANNOT_OPERATE_ON_REMOVED_ADGROUPAD = 5
CANNOT_CREATE_DEPRECATED_ADS = 6
CANNOT_CREATE_TEXT_ADS = 7
EMPTY_FIELD = 8
RESOURCE_REFERENCED_IN_MULTIPLE_OPS = 9
class AdGroupAdRotationModeEnum(object):
class AdGroupAdRotationMode(enum.IntEnum):
"""
The possible ad rotation modes of an ad group.
Attributes:
UNSPECIFIED (int): The ad rotation mode has not been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
OPTIMIZE (int): Optimize ad group ads based on clicks or conversions.
ROTATE_FOREVER (int): Rotate evenly forever.
"""
UNSPECIFIED = 0
UNKNOWN = 1
OPTIMIZE = 2
ROTATE_FOREVER = 3
class AdGroupAdStatusEnum(object):
class AdGroupAdStatus(enum.IntEnum):
"""
The possible statuses of an AdGroupAd.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
ENABLED (int): The ad group ad is enabled.
PAUSED (int): The ad group ad is paused.
REMOVED (int): The ad group ad is removed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
PAUSED = 3
REMOVED = 4
class AdGroupBidModifierErrorEnum(object):
class AdGroupBidModifierError(enum.IntEnum):
"""
Enum describing possible ad group bid modifier errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CRITERION_ID_NOT_SUPPORTED (int): The criterion ID does not support bid modification.
CANNOT_OVERRIDE_OPTED_OUT_CAMPAIGN_CRITERION_BID_MODIFIER (int): Cannot override the bid modifier for the given criterion ID if the parent
campaign is opted out of the same criterion.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CRITERION_ID_NOT_SUPPORTED = 2
CANNOT_OVERRIDE_OPTED_OUT_CAMPAIGN_CRITERION_BID_MODIFIER = 3
class AdGroupCriterionApprovalStatusEnum(object):
class AdGroupCriterionApprovalStatus(enum.IntEnum):
"""
Enumerates AdGroupCriterion approval statuses.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
APPROVED (int): Approved.
DISAPPROVED (int): Disapproved.
PENDING_REVIEW (int): Pending Review.
UNDER_REVIEW (int): Under review.
"""
UNSPECIFIED = 0
UNKNOWN = 1
APPROVED = 2
DISAPPROVED = 3
PENDING_REVIEW = 4
UNDER_REVIEW = 5
class AdGroupCriterionErrorEnum(object):
class AdGroupCriterionError(enum.IntEnum):
"""
Enum describing possible ad group criterion errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
AD_GROUP_CRITERION_LABEL_DOES_NOT_EXIST (int): No link found between the AdGroupCriterion and the label.
AD_GROUP_CRITERION_LABEL_ALREADY_EXISTS (int): The label has already been attached to the AdGroupCriterion.
CANNOT_ADD_LABEL_TO_NEGATIVE_CRITERION (int): Negative AdGroupCriterion cannot have labels.
TOO_MANY_OPERATIONS (int): Too many operations for a single call.
CANT_UPDATE_NEGATIVE (int): Negative ad group criteria are not updateable.
CONCRETE_TYPE_REQUIRED (int): Concrete type of criterion (keyword v.s. placement) is required for ADD
and SET operations.
BID_INCOMPATIBLE_WITH_ADGROUP (int): Bid is incompatible with ad group's bidding settings.
CANNOT_TARGET_AND_EXCLUDE (int): Cannot target and exclude the same criterion at once.
ILLEGAL_URL (int): The URL of a placement is invalid.
INVALID_KEYWORD_TEXT (int): Keyword text was invalid.
INVALID_DESTINATION_URL (int): Destination URL was invalid.
MISSING_DESTINATION_URL_TAG (int): The destination url must contain at least one tag (e.g. {lpurl})
KEYWORD_LEVEL_BID_NOT_SUPPORTED_FOR_MANUALCPM (int): Keyword-level cpm bid is not supported
INVALID_USER_STATUS (int): For example, cannot add a biddable ad group criterion that had been
removed.
CANNOT_ADD_CRITERIA_TYPE (int): Criteria type cannot be targeted for the ad group. Either the account is
restricted to keywords only, the criteria type is incompatible with the
campaign's bidding strategy, or the criteria type can only be applied to
campaigns.
CANNOT_EXCLUDE_CRITERIA_TYPE (int): Criteria type cannot be excluded for the ad group. Refer to the
documentation for a specific criterion to check if it is excludable.
CAMPAIGN_TYPE_NOT_COMPATIBLE_WITH_PARTIAL_FAILURE (int): Partial failure is not supported for shopping campaign mutate operations.
OPERATIONS_FOR_TOO_MANY_SHOPPING_ADGROUPS (int): Operations in the mutate request changes too many shopping ad groups.
Please split requests for multiple shopping ad groups across multiple
requests.
CANNOT_MODIFY_URL_FIELDS_WITH_DUPLICATE_ELEMENTS (int): Not allowed to modify url fields of an ad group criterion if there are
duplicate elements for that ad group criterion in the request.
CANNOT_SET_WITHOUT_FINAL_URLS (int): Cannot set url fields without also setting final urls.
CANNOT_CLEAR_FINAL_URLS_IF_FINAL_MOBILE_URLS_EXIST (int): Cannot clear final urls if final mobile urls exist.
CANNOT_CLEAR_FINAL_URLS_IF_FINAL_APP_URLS_EXIST (int): Cannot clear final urls if final app urls exist.
CANNOT_CLEAR_FINAL_URLS_IF_TRACKING_URL_TEMPLATE_EXISTS (int): Cannot clear final urls if tracking url template exists.
CANNOT_CLEAR_FINAL_URLS_IF_URL_CUSTOM_PARAMETERS_EXIST (int): Cannot clear final urls if url custom parameters exist.
CANNOT_SET_BOTH_DESTINATION_URL_AND_FINAL_URLS (int): Cannot set both destination url and final urls.
CANNOT_SET_BOTH_DESTINATION_URL_AND_TRACKING_URL_TEMPLATE (int): Cannot set both destination url and tracking url template.
FINAL_URLS_NOT_SUPPORTED_FOR_CRITERION_TYPE (int): Final urls are not supported for this criterion type.
FINAL_MOBILE_URLS_NOT_SUPPORTED_FOR_CRITERION_TYPE (int): Final mobile urls are not supported for this criterion type.
INVALID_LISTING_GROUP_HIERARCHY (int): Ad group is invalid due to the listing groups it contains.
LISTING_GROUP_UNIT_CANNOT_HAVE_CHILDREN (int): Listing group unit cannot have children.
LISTING_GROUP_SUBDIVISION_REQUIRES_OTHERS_CASE (int): Subdivided listing groups must have an "others" case.
LISTING_GROUP_REQUIRES_SAME_DIMENSION_TYPE_AS_SIBLINGS (int): Dimension type of listing group must be the same as that of its siblings.
LISTING_GROUP_ALREADY_EXISTS (int): Listing group cannot be added to the ad group because it already exists.
LISTING_GROUP_DOES_NOT_EXIST (int): Listing group referenced in the operation was not found in the ad group.
LISTING_GROUP_CANNOT_BE_REMOVED (int): Recursive removal failed because listing group subdivision is being
created or modified in this request.
INVALID_LISTING_GROUP_TYPE (int): Listing group type is not allowed for specified ad group criterion type.
LISTING_GROUP_ADD_MAY_ONLY_USE_TEMP_ID (int): Listing group in an ADD operation specifies a non temporary criterion id.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AD_GROUP_CRITERION_LABEL_DOES_NOT_EXIST = 2
AD_GROUP_CRITERION_LABEL_ALREADY_EXISTS = 3
CANNOT_ADD_LABEL_TO_NEGATIVE_CRITERION = 4
TOO_MANY_OPERATIONS = 5
CANT_UPDATE_NEGATIVE = 6
CONCRETE_TYPE_REQUIRED = 7
BID_INCOMPATIBLE_WITH_ADGROUP = 8
CANNOT_TARGET_AND_EXCLUDE = 9
ILLEGAL_URL = 10
INVALID_KEYWORD_TEXT = 11
INVALID_DESTINATION_URL = 12
MISSING_DESTINATION_URL_TAG = 13
KEYWORD_LEVEL_BID_NOT_SUPPORTED_FOR_MANUALCPM = 14
INVALID_USER_STATUS = 15
CANNOT_ADD_CRITERIA_TYPE = 16
CANNOT_EXCLUDE_CRITERIA_TYPE = 17
CAMPAIGN_TYPE_NOT_COMPATIBLE_WITH_PARTIAL_FAILURE = 27
OPERATIONS_FOR_TOO_MANY_SHOPPING_ADGROUPS = 28
CANNOT_MODIFY_URL_FIELDS_WITH_DUPLICATE_ELEMENTS = 29
CANNOT_SET_WITHOUT_FINAL_URLS = 30
CANNOT_CLEAR_FINAL_URLS_IF_FINAL_MOBILE_URLS_EXIST = 31
CANNOT_CLEAR_FINAL_URLS_IF_FINAL_APP_URLS_EXIST = 32
CANNOT_CLEAR_FINAL_URLS_IF_TRACKING_URL_TEMPLATE_EXISTS = 33
CANNOT_CLEAR_FINAL_URLS_IF_URL_CUSTOM_PARAMETERS_EXIST = 34
CANNOT_SET_BOTH_DESTINATION_URL_AND_FINAL_URLS = 35
CANNOT_SET_BOTH_DESTINATION_URL_AND_TRACKING_URL_TEMPLATE = 36
FINAL_URLS_NOT_SUPPORTED_FOR_CRITERION_TYPE = 37
FINAL_MOBILE_URLS_NOT_SUPPORTED_FOR_CRITERION_TYPE = 38
INVALID_LISTING_GROUP_HIERARCHY = 39
LISTING_GROUP_UNIT_CANNOT_HAVE_CHILDREN = 40
LISTING_GROUP_SUBDIVISION_REQUIRES_OTHERS_CASE = 41
LISTING_GROUP_REQUIRES_SAME_DIMENSION_TYPE_AS_SIBLINGS = 42
LISTING_GROUP_ALREADY_EXISTS = 43
LISTING_GROUP_DOES_NOT_EXIST = 44
LISTING_GROUP_CANNOT_BE_REMOVED = 45
INVALID_LISTING_GROUP_TYPE = 46
LISTING_GROUP_ADD_MAY_ONLY_USE_TEMP_ID = 47
class AdGroupCriterionStatusEnum(object):
class AdGroupCriterionStatus(enum.IntEnum):
"""
The possible statuses of an AdGroupCriterion.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
ENABLED (int): The ad group criterion is enabled.
PAUSED (int): The ad group criterion is paused.
REMOVED (int): The ad group criterion is removed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
PAUSED = 3
REMOVED = 4
class AdGroupErrorEnum(object):
class AdGroupError(enum.IntEnum):
"""
Enum describing possible ad group errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
DUPLICATE_ADGROUP_NAME (int): AdGroup with the same name already exists for the campaign.
INVALID_ADGROUP_NAME (int): AdGroup name is not valid.
ADVERTISER_NOT_ON_CONTENT_NETWORK (int): Advertiser is not allowed to target sites or set site bids that are not
on the Google Search Network.
BID_TOO_BIG (int): Bid amount is too big.
BID_TYPE_AND_BIDDING_STRATEGY_MISMATCH (int): AdGroup bid does not match the campaign's bidding strategy.
MISSING_ADGROUP_NAME (int): AdGroup name is required for Add.
ADGROUP_LABEL_DOES_NOT_EXIST (int): No link found between the ad group and the label.
ADGROUP_LABEL_ALREADY_EXISTS (int): The label has already been attached to the ad group.
INVALID_CONTENT_BID_CRITERION_TYPE_GROUP (int): The CriterionTypeGroup is not supported for the content bid dimension.
AD_GROUP_TYPE_NOT_VALID_FOR_ADVERTISING_CHANNEL_TYPE (int): The ad group type is not compatible with the campaign channel type.
ADGROUP_TYPE_NOT_SUPPORTED_FOR_CAMPAIGN_SALES_COUNTRY (int): The ad group type is not supported in the country of sale of the
campaign.
CANNOT_ADD_ADGROUP_OF_TYPE_DSA_TO_CAMPAIGN_WITHOUT_DSA_SETTING (int): Ad groups of AdGroupType.SEARCH\_DYNAMIC\_ADS can only be added to
campaigns that have DynamicSearchAdsSetting attached.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DUPLICATE_ADGROUP_NAME = 2
INVALID_ADGROUP_NAME = 3
ADVERTISER_NOT_ON_CONTENT_NETWORK = 5
BID_TOO_BIG = 6
BID_TYPE_AND_BIDDING_STRATEGY_MISMATCH = 7
MISSING_ADGROUP_NAME = 8
ADGROUP_LABEL_DOES_NOT_EXIST = 9
ADGROUP_LABEL_ALREADY_EXISTS = 10
INVALID_CONTENT_BID_CRITERION_TYPE_GROUP = 11
AD_GROUP_TYPE_NOT_VALID_FOR_ADVERTISING_CHANNEL_TYPE = 12
ADGROUP_TYPE_NOT_SUPPORTED_FOR_CAMPAIGN_SALES_COUNTRY = 13
CANNOT_ADD_ADGROUP_OF_TYPE_DSA_TO_CAMPAIGN_WITHOUT_DSA_SETTING = 14
class AdGroupFeedErrorEnum(object):
class AdGroupFeedError(enum.IntEnum):
"""
Enum describing possible ad group feed errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE (int): An active feed already exists for this ad group and place holder type.
CANNOT_CREATE_FOR_REMOVED_FEED (int): The specified feed is removed.
ADGROUP_FEED_ALREADY_EXISTS (int): The AdGroupFeed already exists. UPDATE operation should be used to modify
the existing AdGroupFeed.
CANNOT_OPERATE_ON_REMOVED_ADGROUP_FEED (int): Cannot operate on removed AdGroupFeed.
INVALID_PLACEHOLDER_TYPE (int): Invalid placeholder type.
MISSING_FEEDMAPPING_FOR_PLACEHOLDER_TYPE (int): Feed mapping for this placeholder type does not exist.
NO_EXISTING_LOCATION_CUSTOMER_FEED (int): Location AdGroupFeeds cannot be created unless there is a location
CustomerFeed for the specified feed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE = 2
CANNOT_CREATE_FOR_REMOVED_FEED = 3
ADGROUP_FEED_ALREADY_EXISTS = 4
CANNOT_OPERATE_ON_REMOVED_ADGROUP_FEED = 5
INVALID_PLACEHOLDER_TYPE = 6
MISSING_FEEDMAPPING_FOR_PLACEHOLDER_TYPE = 7
NO_EXISTING_LOCATION_CUSTOMER_FEED = 8
class AdGroupStatusEnum(object):
class AdGroupStatus(enum.IntEnum):
"""
The possible statuses of an ad group.
Attributes:
UNSPECIFIED (int): The status has not been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
ENABLED (int): The ad group is enabled.
PAUSED (int): The ad group is paused.
REMOVED (int): The ad group is removed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
PAUSED = 3
REMOVED = 4
class AdGroupTypeEnum(object):
class AdGroupType(enum.IntEnum):
"""
Enum listing the possible types of an ad group.
Attributes:
UNSPECIFIED (int): The type has not been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
SEARCH_STANDARD (int): The default ad group type for Search campaigns.
DISPLAY_STANDARD (int): The default ad group type for Display campaigns.
SHOPPING_PRODUCT_ADS (int): The ad group type for Shopping campaigns serving standard product ads.
HOTEL_ADS (int): The default ad group type for Hotel campaigns.
SHOPPING_SMART_ADS (int): The type for ad groups in Smart Shopping campaigns.
VIDEO_BUMPER (int): Short unskippable in-stream video ads.
VIDEO_TRUE_VIEW_IN_STREAM (int): TrueView (skippable) in-stream video ads.
VIDEO_TRUE_VIEW_IN_DISPLAY (int): TrueView in-display video ads.
VIDEO_NON_SKIPPABLE_IN_STREAM (int): Unskippable in-stream video ads.
VIDEO_OUTSTREAM (int): Outstream video ads.
SEARCH_DYNAMIC_ADS (int): Ad group type for Dynamic Search Ads ad groups.
"""
UNSPECIFIED = 0
UNKNOWN = 1
SEARCH_STANDARD = 2
DISPLAY_STANDARD = 3
SHOPPING_PRODUCT_ADS = 4
HOTEL_ADS = 6
SHOPPING_SMART_ADS = 7
VIDEO_BUMPER = 8
VIDEO_TRUE_VIEW_IN_STREAM = 9
VIDEO_TRUE_VIEW_IN_DISPLAY = 10
VIDEO_NON_SKIPPABLE_IN_STREAM = 11
VIDEO_OUTSTREAM = 12
SEARCH_DYNAMIC_ADS = 13
class AdNetworkTypeEnum(object):
class AdNetworkType(enum.IntEnum):
"""
Enumerates Google Ads network types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
SEARCH (int): Google search.
SEARCH_PARTNERS (int): Search partners.
CONTENT (int): Display Network.
YOUTUBE_SEARCH (int): YouTube Search.
YOUTUBE_WATCH (int): YouTube Videos
MIXED (int): Cross-network.
"""
UNSPECIFIED = 0
UNKNOWN = 1
SEARCH = 2
SEARCH_PARTNERS = 3
CONTENT = 4
YOUTUBE_SEARCH = 5
YOUTUBE_WATCH = 6
MIXED = 7
class AdParameterErrorEnum(object):
class AdParameterError(enum.IntEnum):
"""
Enum describing possible ad parameter errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
AD_GROUP_CRITERION_MUST_BE_KEYWORD (int): The ad group criterion must be a keyword criterion.
INVALID_INSERTION_TEXT_FORMAT (int): The insertion text is invalid.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AD_GROUP_CRITERION_MUST_BE_KEYWORD = 2
INVALID_INSERTION_TEXT_FORMAT = 3
class AdServingOptimizationStatusEnum(object):
class AdServingOptimizationStatus(enum.IntEnum):
"""
Enum describing possible serving statuses.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
OPTIMIZE (int): Ad serving is optimized based on CTR for the campaign.
CONVERSION_OPTIMIZE (int): Ad serving is optimized based on CTR \* Conversion for the campaign. If
the campaign is not in the conversion optimizer bidding strategy, it
will default to OPTIMIZED.
ROTATE (int): Ads are rotated evenly for 90 days, then optimized for clicks.
ROTATE_INDEFINITELY (int): Show lower performing ads more evenly with higher performing ads, and do
not optimize.
UNAVAILABLE (int): Ad serving optimization status is not available.
"""
UNSPECIFIED = 0
UNKNOWN = 1
OPTIMIZE = 2
CONVERSION_OPTIMIZE = 3
ROTATE = 4
ROTATE_INDEFINITELY = 5
UNAVAILABLE = 6
class AdSharingErrorEnum(object):
class AdSharingError(enum.IntEnum):
"""
Enum describing possible ad sharing errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
AD_GROUP_ALREADY_CONTAINS_AD (int): Error resulting in attempting to add an Ad to an AdGroup that already
contains the Ad.
INCOMPATIBLE_AD_UNDER_AD_GROUP (int): Ad is not compatible with the AdGroup it is being shared with.
CANNOT_SHARE_INACTIVE_AD (int): Cannot add AdGroupAd on inactive Ad.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AD_GROUP_ALREADY_CONTAINS_AD = 2
INCOMPATIBLE_AD_UNDER_AD_GROUP = 3
CANNOT_SHARE_INACTIVE_AD = 4
class AdStrengthEnum(object):
class AdStrength(enum.IntEnum):
"""
Enum listing the possible ad strengths.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PENDING (int): The ad strength is currently pending.
NO_ADS (int): No ads could be generated.
POOR (int): Poor strength.
AVERAGE (int): Average strength.
GOOD (int): Good strength.
EXCELLENT (int): Excellent strength.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PENDING = 2
NO_ADS = 3
POOR = 4
AVERAGE = 5
GOOD = 6
EXCELLENT = 7
class AdTypeEnum(object):
class AdType(enum.IntEnum):
"""
The possible types of an ad.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
TEXT_AD (int): The ad is a text ad.
EXPANDED_TEXT_AD (int): The ad is an expanded text ad.
CALL_ONLY_AD (int): The ad is a call only ad.
EXPANDED_DYNAMIC_SEARCH_AD (int): The ad is an expanded dynamic search ad.
HOTEL_AD (int): The ad is a hotel ad.
SHOPPING_SMART_AD (int): The ad is a Smart Shopping ad.
SHOPPING_PRODUCT_AD (int): The ad is a standard Shopping ad.
VIDEO_AD (int): The ad is a video ad.
GMAIL_AD (int): This ad is a Gmail ad.
IMAGE_AD (int): This ad is an Image ad.
RESPONSIVE_SEARCH_AD (int): The ad is a responsive search ad.
LEGACY_RESPONSIVE_DISPLAY_AD (int): The ad is a legacy responsive display ad.
APP_AD (int): The ad is an app ad.
LEGACY_APP_INSTALL_AD (int): The ad is a legacy app install ad.
RESPONSIVE_DISPLAY_AD (int): The ad is a responsive display ad.
"""
UNSPECIFIED = 0
UNKNOWN = 1
TEXT_AD = 2
EXPANDED_TEXT_AD = 3
CALL_ONLY_AD = 6
EXPANDED_DYNAMIC_SEARCH_AD = 7
HOTEL_AD = 8
SHOPPING_SMART_AD = 9
SHOPPING_PRODUCT_AD = 10
VIDEO_AD = 12
GMAIL_AD = 13
IMAGE_AD = 14
RESPONSIVE_SEARCH_AD = 15
LEGACY_RESPONSIVE_DISPLAY_AD = 16
APP_AD = 17
LEGACY_APP_INSTALL_AD = 18
RESPONSIVE_DISPLAY_AD = 19
class AdvertisingChannelSubTypeEnum(object):
class AdvertisingChannelSubType(enum.IntEnum):
"""
Enum describing the different channel subtypes.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used as a return value only. Represents value unknown in this version.
SEARCH_MOBILE_APP (int): Mobile app campaigns for Search.
DISPLAY_MOBILE_APP (int): Mobile app campaigns for Display.
SEARCH_EXPRESS (int): AdWords express campaigns for search.
DISPLAY_EXPRESS (int): AdWords Express campaigns for display.
SHOPPING_SMART_ADS (int): Smart Shopping campaigns.
DISPLAY_GMAIL_AD (int): Gmail Ad campaigns.
DISPLAY_SMART_CAMPAIGN (int): Smart display campaigns.
VIDEO_OUTSTREAM (int): Video Outstream campaigns.
VIDEO_ACTION (int): Video TrueView for Action campaigns.
VIDEO_NON_SKIPPABLE (int): Video campaigns with non-skippable video ads.
APP_CAMPAIGN (int): Universal App Campaign.
"""
UNSPECIFIED = 0
UNKNOWN = 1
SEARCH_MOBILE_APP = 2
DISPLAY_MOBILE_APP = 3
SEARCH_EXPRESS = 4
DISPLAY_EXPRESS = 5
SHOPPING_SMART_ADS = 6
DISPLAY_GMAIL_AD = 7
DISPLAY_SMART_CAMPAIGN = 8
VIDEO_OUTSTREAM = 9
VIDEO_ACTION = 10
VIDEO_NON_SKIPPABLE = 11
APP_CAMPAIGN = 12
class AdvertisingChannelTypeEnum(object):
class AdvertisingChannelType(enum.IntEnum):
"""
Enum describing the various advertising channel types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
SEARCH (int): Search Network. Includes display bundled, and Search+ campaigns.
DISPLAY (int): Google Display Network only.
SHOPPING (int): Shopping campaigns serve on the shopping property
and on google.com search results.
HOTEL (int): Hotel Ads campaigns.
VIDEO (int): Video campaigns.
MULTI_CHANNEL (int): Universal App Campaigns, including universal app install and universal
app reengagement campaigns, that run across multiple channels.
"""
UNSPECIFIED = 0
UNKNOWN = 1
SEARCH = 2
DISPLAY = 3
SHOPPING = 4
HOTEL = 5
VIDEO = 6
MULTI_CHANNEL = 7
class AdxErrorEnum(object):
class AdxError(enum.IntEnum):
"""
Enum describing possible adx errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
UNSUPPORTED_FEATURE (int): Attempt to use non-AdX feature by AdX customer.
"""
UNSPECIFIED = 0
UNKNOWN = 1
UNSUPPORTED_FEATURE = 2
class AffiliateLocationFeedRelationshipTypeEnum(object):
class AffiliateLocationFeedRelationshipType(enum.IntEnum):
"""
Possible values for a relationship type for an affiliate location feed.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
GENERAL_RETAILER (int): General retailer relationship.
"""
UNSPECIFIED = 0
UNKNOWN = 1
GENERAL_RETAILER = 2
class AffiliateLocationPlaceholderFieldEnum(object):
class AffiliateLocationPlaceholderField(enum.IntEnum):
"""
Possible values for Affiliate Location placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
BUSINESS_NAME (int): Data Type: STRING. The name of the business.
ADDRESS_LINE_1 (int): Data Type: STRING. Line 1 of the business address.
ADDRESS_LINE_2 (int): Data Type: STRING. Line 2 of the business address.
CITY (int): Data Type: STRING. City of the business address.
PROVINCE (int): Data Type: STRING. Province of the business address.
POSTAL_CODE (int): Data Type: STRING. Postal code of the business address.
COUNTRY_CODE (int): Data Type: STRING. Country code of the business address.
PHONE_NUMBER (int): Data Type: STRING. Phone number of the business.
LANGUAGE_CODE (int): Data Type: STRING. Language code of the business.
CHAIN_ID (int): Data Type: INT64. ID of the chain.
CHAIN_NAME (int): Data Type: STRING. Name of the chain.
"""
UNSPECIFIED = 0
UNKNOWN = 1
BUSINESS_NAME = 2
ADDRESS_LINE_1 = 3
ADDRESS_LINE_2 = 4
CITY = 5
PROVINCE = 6
POSTAL_CODE = 7
COUNTRY_CODE = 8
PHONE_NUMBER = 9
LANGUAGE_CODE = 10
CHAIN_ID = 11
CHAIN_NAME = 12
class AgeRangeTypeEnum(object):
class AgeRangeType(enum.IntEnum):
"""
The type of demographic age ranges (e.g. between 18 and 24 years old).
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
AGE_RANGE_18_24 (int): Between 18 and 24 years old.
AGE_RANGE_25_34 (int): Between 25 and 34 years old.
AGE_RANGE_35_44 (int): Between 35 and 44 years old.
AGE_RANGE_45_54 (int): Between 45 and 54 years old.
AGE_RANGE_55_64 (int): Between 55 and 64 years old.
AGE_RANGE_65_UP (int): 65 years old and beyond.
AGE_RANGE_UNDETERMINED (int): Undetermined age range.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AGE_RANGE_18_24 = 503001
AGE_RANGE_25_34 = 503002
AGE_RANGE_35_44 = 503003
AGE_RANGE_45_54 = 503004
AGE_RANGE_55_64 = 503005
AGE_RANGE_65_UP = 503006
AGE_RANGE_UNDETERMINED = 503999
class AppCampaignAppStoreEnum(object):
class AppCampaignAppStore(enum.IntEnum):
"""
Enum describing app campaign app store.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
APPLE_APP_STORE (int): Apple app store.
GOOGLE_APP_STORE (int): Google play.
"""
UNSPECIFIED = 0
UNKNOWN = 1
APPLE_APP_STORE = 2
GOOGLE_APP_STORE = 3
class AppCampaignBiddingStrategyGoalTypeEnum(object):
class AppCampaignBiddingStrategyGoalType(enum.IntEnum):
"""
Goal type of App campaign BiddingStrategy.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
OPTIMIZE_INSTALLS_TARGET_INSTALL_COST (int): Aim to maximize the number of app installs. The cpa bid is the
target cost per install.
OPTIMIZE_IN_APP_CONVERSIONS_TARGET_INSTALL_COST (int): Aim to maximize the long term number of selected in-app conversions from
app installs. The cpa bid is the target cost per install.
OPTIMIZE_IN_APP_CONVERSIONS_TARGET_CONVERSION_COST (int): Aim to maximize the long term number of selected in-app conversions from
app installs. The cpa bid is the target cost per in-app conversion. Note
that the actual cpa may seem higher than the target cpa at first, since
the long term conversions havenโt happened yet.
OPTIMIZE_RETURN_ON_ADVERTISING_SPEND (int): Aim to maximize all conversions' value, i.e. install + selected in-app
conversions while achieving or exceeding target return on advertising
spend.
"""
UNSPECIFIED = 0
UNKNOWN = 1
OPTIMIZE_INSTALLS_TARGET_INSTALL_COST = 2
OPTIMIZE_IN_APP_CONVERSIONS_TARGET_INSTALL_COST = 3
OPTIMIZE_IN_APP_CONVERSIONS_TARGET_CONVERSION_COST = 4
OPTIMIZE_RETURN_ON_ADVERTISING_SPEND = 5
class AppPaymentModelTypeEnum(object):
class AppPaymentModelType(enum.IntEnum):
"""
Enum describing possible app payment models.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PAID (int): Represents paid-for apps.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PAID = 30
class AppPlaceholderFieldEnum(object):
class AppPlaceholderField(enum.IntEnum):
"""
Possible values for App placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
STORE (int): Data Type: INT64. The application store that the target application
belongs to. Valid values are: 1 = Apple iTunes Store; 2 = Google Play
Store.
ID (int): Data Type: STRING. The store-specific ID for the target application.
LINK_TEXT (int): Data Type: STRING. The visible text displayed when the link is rendered
in an ad.
URL (int): Data Type: STRING. The destination URL of the in-app link.
FINAL_URLS (int): Data Type: URL\_LIST. Final URLs for the in-app link when using Upgraded
URLs.
FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. Final Mobile URLs for the in-app link when using
Upgraded URLs.
TRACKING_URL (int): Data Type: URL. Tracking template for the in-app link when using Upgraded
URLs.
FINAL_URL_SUFFIX (int): Data Type: STRING. Final URL suffix for the in-app link when using
parallel tracking.
"""
UNSPECIFIED = 0
UNKNOWN = 1
STORE = 2
ID = 3
LINK_TEXT = 4
URL = 5
FINAL_URLS = 6
FINAL_MOBILE_URLS = 7
TRACKING_URL = 8
FINAL_URL_SUFFIX = 9
class AppStoreEnum(object):
class AppStore(enum.IntEnum):
"""
App store type in an app extension.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
APPLE_ITUNES (int): Apple iTunes.
GOOGLE_PLAY (int): Google Play.
"""
UNSPECIFIED = 0
UNKNOWN = 1
APPLE_ITUNES = 2
GOOGLE_PLAY = 3
class AssetErrorEnum(object):
class AssetError(enum.IntEnum):
"""
Enum describing possible asset errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CUSTOMER_NOT_WHITELISTED_FOR_ASSET_TYPE (int): The customer is not whitelisted for this asset type.
DUPLICATE_ASSET (int): Assets are duplicated across operations.
DUPLICATE_ASSET_NAME (int): The asset name is duplicated, either across operations or with an
existing asset.
ASSET_DATA_IS_MISSING (int): The Asset.asset\_data oneof is empty.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CUSTOMER_NOT_WHITELISTED_FOR_ASSET_TYPE = 2
DUPLICATE_ASSET = 3
DUPLICATE_ASSET_NAME = 4
ASSET_DATA_IS_MISSING = 5
class AssetTypeEnum(object):
class AssetType(enum.IntEnum):
"""
Enum describing possible types of asset.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
YOUTUBE_VIDEO (int): YouTube video asset.
MEDIA_BUNDLE (int): Media bundle asset.
IMAGE (int): Image asset.
"""
UNSPECIFIED = 0
UNKNOWN = 1
YOUTUBE_VIDEO = 2
MEDIA_BUNDLE = 3
IMAGE = 4
class AttributionModelEnum(object):
class AttributionModel(enum.IntEnum):
"""
The attribution model that describes how to distribute credit for a
particular conversion across potentially many prior interactions.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
EXTERNAL (int): Uses external attribution.
GOOGLE_ADS_LAST_CLICK (int): Attributes all credit for a conversion to its last click.
GOOGLE_SEARCH_ATTRIBUTION_FIRST_CLICK (int): Attributes all credit for a conversion to its first click using Google
Search attribution.
GOOGLE_SEARCH_ATTRIBUTION_LINEAR (int): Attributes credit for a conversion equally across all of its clicks using
Google Search attribution.
GOOGLE_SEARCH_ATTRIBUTION_TIME_DECAY (int): Attributes exponentially more credit for a conversion to its more recent
clicks using Google Search attribution (half-life is 1 week).
GOOGLE_SEARCH_ATTRIBUTION_POSITION_BASED (int): Attributes 40% of the credit for a conversion to its first and last
clicks. Remaining 20% is evenly distributed across all other clicks. This
uses Google Search attribution.
GOOGLE_SEARCH_ATTRIBUTION_DATA_DRIVEN (int): Flexible model that uses machine learning to determine the appropriate
distribution of credit among clicks using Google Search attribution.
"""
UNSPECIFIED = 0
UNKNOWN = 1
EXTERNAL = 100
GOOGLE_ADS_LAST_CLICK = 101
GOOGLE_SEARCH_ATTRIBUTION_FIRST_CLICK = 102
GOOGLE_SEARCH_ATTRIBUTION_LINEAR = 103
GOOGLE_SEARCH_ATTRIBUTION_TIME_DECAY = 104
GOOGLE_SEARCH_ATTRIBUTION_POSITION_BASED = 105
GOOGLE_SEARCH_ATTRIBUTION_DATA_DRIVEN = 106
class AuthenticationErrorEnum(object):
class AuthenticationError(enum.IntEnum):
"""
Enum describing possible authentication errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
AUTHENTICATION_ERROR (int): Authentication of the request failed.
CLIENT_CUSTOMER_ID_INVALID (int): Client Customer Id is not a number.
CUSTOMER_NOT_FOUND (int): No customer found for the provided customer id.
GOOGLE_ACCOUNT_DELETED (int): Client's Google Account is deleted.
GOOGLE_ACCOUNT_COOKIE_INVALID (int): Google account login token in the cookie is invalid.
GOOGLE_ACCOUNT_AUTHENTICATION_FAILED (int): A problem occurred during Google account authentication.
GOOGLE_ACCOUNT_USER_AND_ADS_USER_MISMATCH (int): The user in the google account login token does not match the UserId in
the cookie.
LOGIN_COOKIE_REQUIRED (int): Login cookie is required for authentication.
NOT_ADS_USER (int): User in the cookie is not a valid Ads user.
OAUTH_TOKEN_INVALID (int): Oauth token in the header is not valid.
OAUTH_TOKEN_EXPIRED (int): Oauth token in the header has expired.
OAUTH_TOKEN_DISABLED (int): Oauth token in the header has been disabled.
OAUTH_TOKEN_REVOKED (int): Oauth token in the header has been revoked.
OAUTH_TOKEN_HEADER_INVALID (int): Oauth token HTTP header is malformed.
LOGIN_COOKIE_INVALID (int): Login cookie is not valid.
USER_ID_INVALID (int): User Id in the header is not a valid id.
TWO_STEP_VERIFICATION_NOT_ENROLLED (int): An account administrator changed this account's authentication settings.
To access this Google Ads account, enable 2-Step Verification in your
Google account at https://www.google.com/landing/2step.
ADVANCED_PROTECTION_NOT_ENROLLED (int): An account administrator changed this account's authentication settings.
To access this Google Ads account, enable Advanced Protection in your
Google account at https://landing.google.com/advancedprotection.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AUTHENTICATION_ERROR = 2
CLIENT_CUSTOMER_ID_INVALID = 5
CUSTOMER_NOT_FOUND = 8
GOOGLE_ACCOUNT_DELETED = 9
GOOGLE_ACCOUNT_COOKIE_INVALID = 10
GOOGLE_ACCOUNT_AUTHENTICATION_FAILED = 25
GOOGLE_ACCOUNT_USER_AND_ADS_USER_MISMATCH = 12
LOGIN_COOKIE_REQUIRED = 13
NOT_ADS_USER = 14
OAUTH_TOKEN_INVALID = 15
OAUTH_TOKEN_EXPIRED = 16
OAUTH_TOKEN_DISABLED = 17
OAUTH_TOKEN_REVOKED = 18
OAUTH_TOKEN_HEADER_INVALID = 19
LOGIN_COOKIE_INVALID = 20
USER_ID_INVALID = 22
TWO_STEP_VERIFICATION_NOT_ENROLLED = 23
ADVANCED_PROTECTION_NOT_ENROLLED = 24
class AuthorizationErrorEnum(object):
class AuthorizationError(enum.IntEnum):
"""
Enum describing possible authorization errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
USER_PERMISSION_DENIED (int): User doesn't have permission to access customer.
DEVELOPER_TOKEN_NOT_WHITELISTED (int): The developer token is not whitelisted.
DEVELOPER_TOKEN_PROHIBITED (int): The developer token is not allowed with the project sent in the request.
PROJECT_DISABLED (int): The Google Cloud project sent in the request does not have permission to
access the api.
AUTHORIZATION_ERROR (int): Authorization of the client failed.
ACTION_NOT_PERMITTED (int): The user does not have permission to perform this action
(e.g., ADD, UPDATE, REMOVE) on the resource or call a method.
INCOMPLETE_SIGNUP (int): Signup not complete.
CUSTOMER_NOT_ENABLED (int): The customer can't be used because it isn't enabled.
MISSING_TOS (int): The developer must sign the terms of service. They can be found here:
ads.google.com/aw/apicenter
DEVELOPER_TOKEN_NOT_APPROVED (int): The developer token is not approved. Non-approved developer tokens can
only be used with test accounts.
"""
UNSPECIFIED = 0
UNKNOWN = 1
USER_PERMISSION_DENIED = 2
DEVELOPER_TOKEN_NOT_WHITELISTED = 3
DEVELOPER_TOKEN_PROHIBITED = 4
PROJECT_DISABLED = 5
AUTHORIZATION_ERROR = 6
ACTION_NOT_PERMITTED = 7
INCOMPLETE_SIGNUP = 8
CUSTOMER_NOT_ENABLED = 24
MISSING_TOS = 9
DEVELOPER_TOKEN_NOT_APPROVED = 10
class BidModifierSourceEnum(object):
class BidModifierSource(enum.IntEnum):
"""
Enum describing possible bid modifier sources.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
CAMPAIGN (int): The bid modifier is specified at the campaign level, on the campaign
level criterion.
AD_GROUP (int): The bid modifier is specified (overridden) at the ad group level.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CAMPAIGN = 2
AD_GROUP = 3
class BiddingErrorEnum(object):
class BiddingError(enum.IntEnum):
"""
Enum describing possible bidding errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
BIDDING_STRATEGY_TRANSITION_NOT_ALLOWED (int): Cannot transition to new bidding strategy.
CANNOT_ATTACH_BIDDING_STRATEGY_TO_CAMPAIGN (int): Cannot attach bidding strategy to campaign.
INVALID_ANONYMOUS_BIDDING_STRATEGY_TYPE (int): Bidding strategy is not supported or cannot be used as anonymous.
INVALID_BIDDING_STRATEGY_TYPE (int): The type does not match the named strategy's type.
INVALID_BID (int): The bid is invalid.
BIDDING_STRATEGY_NOT_AVAILABLE_FOR_ACCOUNT_TYPE (int): Bidding strategy is not available for the account type.
CONVERSION_TRACKING_NOT_ENABLED (int): Conversion tracking is not enabled for the campaign for VBB transition.
NOT_ENOUGH_CONVERSIONS (int): Not enough conversions tracked for VBB transitions.
CANNOT_CREATE_CAMPAIGN_WITH_BIDDING_STRATEGY (int): Campaign can not be created with given bidding strategy. It can be
transitioned to the strategy, once eligible.
CANNOT_TARGET_CONTENT_NETWORK_ONLY_WITH_CAMPAIGN_LEVEL_POP_BIDDING_STRATEGY (int): Cannot target content network only as campaign uses Page One Promoted
bidding strategy.
BIDDING_STRATEGY_NOT_SUPPORTED_WITH_AD_SCHEDULE (int): Budget Optimizer and Target Spend bidding strategies are not supported
for campaigns with AdSchedule targeting.
PAY_PER_CONVERSION_NOT_AVAILABLE_FOR_CUSTOMER (int): Pay per conversion is not available to all the customer, only few
whitelisted customers can use this.
PAY_PER_CONVERSION_NOT_ALLOWED_WITH_TARGET_CPA (int): Pay per conversion is not allowed with Target CPA.
BIDDING_STRATEGY_NOT_ALLOWED_FOR_SEARCH_ONLY_CAMPAIGNS (int): Cannot set bidding strategy to Manual CPM for search network only
campaigns.
BIDDING_STRATEGY_NOT_SUPPORTED_IN_DRAFTS_OR_EXPERIMENTS (int): The bidding strategy is not supported for use in drafts or experiments.
BIDDING_STRATEGY_TYPE_DOES_NOT_SUPPORT_PRODUCT_TYPE_ADGROUP_CRITERION (int): Bidding strategy type does not support product type ad group criterion.
BID_TOO_SMALL (int): Bid amount is too small.
BID_TOO_BIG (int): Bid amount is too big.
BID_TOO_MANY_FRACTIONAL_DIGITS (int): Bid has too many fractional digit precision.
INVALID_DOMAIN_NAME (int): Invalid domain name specified.
NOT_COMPATIBLE_WITH_PAYMENT_MODE (int): The field is not compatible with payment mode.
NOT_COMPATIBLE_WITH_BUDGET_TYPE (int): Bidding strategy is incompatible with the budget type.
"""
UNSPECIFIED = 0
UNKNOWN = 1
BIDDING_STRATEGY_TRANSITION_NOT_ALLOWED = 2
CANNOT_ATTACH_BIDDING_STRATEGY_TO_CAMPAIGN = 7
INVALID_ANONYMOUS_BIDDING_STRATEGY_TYPE = 10
INVALID_BIDDING_STRATEGY_TYPE = 14
INVALID_BID = 17
BIDDING_STRATEGY_NOT_AVAILABLE_FOR_ACCOUNT_TYPE = 18
CONVERSION_TRACKING_NOT_ENABLED = 19
NOT_ENOUGH_CONVERSIONS = 20
CANNOT_CREATE_CAMPAIGN_WITH_BIDDING_STRATEGY = 21
CANNOT_TARGET_CONTENT_NETWORK_ONLY_WITH_CAMPAIGN_LEVEL_POP_BIDDING_STRATEGY = 23
BIDDING_STRATEGY_NOT_SUPPORTED_WITH_AD_SCHEDULE = 24
PAY_PER_CONVERSION_NOT_AVAILABLE_FOR_CUSTOMER = 25
PAY_PER_CONVERSION_NOT_ALLOWED_WITH_TARGET_CPA = 26
BIDDING_STRATEGY_NOT_ALLOWED_FOR_SEARCH_ONLY_CAMPAIGNS = 27
BIDDING_STRATEGY_NOT_SUPPORTED_IN_DRAFTS_OR_EXPERIMENTS = 28
BIDDING_STRATEGY_TYPE_DOES_NOT_SUPPORT_PRODUCT_TYPE_ADGROUP_CRITERION = 29
BID_TOO_SMALL = 30
BID_TOO_BIG = 31
BID_TOO_MANY_FRACTIONAL_DIGITS = 32
INVALID_DOMAIN_NAME = 33
NOT_COMPATIBLE_WITH_PAYMENT_MODE = 34
NOT_COMPATIBLE_WITH_BUDGET_TYPE = 35
class BiddingSourceEnum(object):
class BiddingSource(enum.IntEnum):
"""
Indicates where a bid or target is defined. For example, an ad group
criterion may define a cpc bid directly, or it can inherit its cpc bid from
the ad group.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
CAMPAIGN_BIDDING_STRATEGY (int): Effective bid or target is inherited from campaign bidding strategy.
AD_GROUP (int): The bid or target is defined on the ad group.
AD_GROUP_CRITERION (int): The bid or target is defined on the ad group criterion.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CAMPAIGN_BIDDING_STRATEGY = 5
AD_GROUP = 6
AD_GROUP_CRITERION = 7
class BiddingStrategyErrorEnum(object):
class BiddingStrategyError(enum.IntEnum):
"""
Enum describing possible bidding strategy errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
DUPLICATE_NAME (int): Each bidding strategy must have a unique name.
CANNOT_CHANGE_BIDDING_STRATEGY_TYPE (int): Bidding strategy type is immutable.
CANNOT_REMOVE_ASSOCIATED_STRATEGY (int): Only bidding strategies not linked to campaigns, adgroups or adgroup
criteria can be removed.
BIDDING_STRATEGY_NOT_SUPPORTED (int): The specified bidding strategy is not supported.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DUPLICATE_NAME = 2
CANNOT_CHANGE_BIDDING_STRATEGY_TYPE = 3
CANNOT_REMOVE_ASSOCIATED_STRATEGY = 4
BIDDING_STRATEGY_NOT_SUPPORTED = 5
class BiddingStrategyStatusEnum(object):
class BiddingStrategyStatus(enum.IntEnum):
"""
The possible statuses of a BiddingStrategy.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
ENABLED (int): The bidding strategy is enabled.
REMOVED (int): The bidding strategy is removed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 4
class BiddingStrategyTypeEnum(object):
class BiddingStrategyType(enum.IntEnum):
"""
Enum describing possible bidding strategy types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ENHANCED_CPC (int): Enhanced CPC is a bidding strategy that raises bids for clicks
that seem more likely to lead to a conversion and lowers
them for clicks where they seem less likely.
MANUAL_CPC (int): Manual click based bidding where user pays per click.
MANUAL_CPM (int): Manual impression based bidding
where user pays per thousand impressions.
MANUAL_CPV (int): A bidding strategy that pays a configurable amount per video view.
MAXIMIZE_CONVERSIONS (int): A bidding strategy that automatically maximizes number of conversions
given a daily budget.
MAXIMIZE_CONVERSION_VALUE (int): An automated bidding strategy that automatically sets bids to maximize
revenue while spending your budget.
PAGE_ONE_PROMOTED (int): Page-One Promoted bidding scheme, which sets max cpc bids to
target impressions on page one or page one promoted slots on google.com.
PERCENT_CPC (int): Percent Cpc is bidding strategy where bids are a fraction of the
advertised price for some good or service.
TARGET_CPA (int): Target CPA is an automated bid strategy that sets bids
to help get as many conversions as possible
at the target cost-per-acquisition (CPA) you set.
TARGET_CPM (int): Target CPM is an automated bid strategy that sets bids to help get
as many impressions as possible at the target cost per one thousand
impressions (CPM) you set.
TARGET_IMPRESSION_SHARE (int): An automated bidding strategy that sets bids so that a certain percentage
of search ads are shown at the top of the first page (or other targeted
location).
TARGET_OUTRANK_SHARE (int): Target Outrank Share is an automated bidding strategy that sets bids
based on the target fraction of auctions where the advertiser
should outrank a specific competitor.
TARGET_ROAS (int): Target ROAS is an automated bidding strategy
that helps you maximize revenue while averaging
a specific target Return On Average Spend (ROAS).
TARGET_SPEND (int): Target Spend is an automated bid strategy that sets your bids
to help get as many clicks as possible within your budget.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENHANCED_CPC = 2
MANUAL_CPC = 3
MANUAL_CPM = 4
MANUAL_CPV = 13
MAXIMIZE_CONVERSIONS = 10
MAXIMIZE_CONVERSION_VALUE = 11
PAGE_ONE_PROMOTED = 5
PERCENT_CPC = 12
TARGET_CPA = 6
TARGET_CPM = 14
TARGET_IMPRESSION_SHARE = 15
TARGET_OUTRANK_SHARE = 7
TARGET_ROAS = 8
TARGET_SPEND = 9
class BillingSetupErrorEnum(object):
class BillingSetupError(enum.IntEnum):
"""
Enum describing possible billing setup errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CANNOT_USE_EXISTING_AND_NEW_ACCOUNT (int): Cannot use both an existing Payments account and a new Payments account
when setting up billing.
CANNOT_REMOVE_STARTED_BILLING_SETUP (int): Cannot cancel an APPROVED billing setup whose start time has passed.
CANNOT_CHANGE_BILLING_TO_SAME_PAYMENTS_ACCOUNT (int): Cannot perform a Change of Bill-To (CBT) to the same Payments account.
BILLING_SETUP_NOT_PERMITTED_FOR_CUSTOMER_STATUS (int): Billing Setups can only be used by customers with ENABLED or DRAFT
status.
INVALID_PAYMENTS_ACCOUNT (int): Billing Setups must either include a correctly formatted existing
Payments account id, or a non-empty new Payments account name.
BILLING_SETUP_NOT_PERMITTED_FOR_CUSTOMER_CATEGORY (int): Only billable and third-party customers can create billing setups.
INVALID_START_TIME_TYPE (int): Billing Setup creations can only use NOW for start time type.
THIRD_PARTY_ALREADY_HAS_BILLING (int): Billing Setups can only be created for a third-party customer if they do
not already have a setup.
BILLING_SETUP_IN_PROGRESS (int): Billing Setups cannot be created if there is already a pending billing in
progress, ie. a billing known to Payments.
NO_SIGNUP_PERMISSION (int): Billing Setups can only be created by customers who have permission to
setup billings. Users can contact a representative for help setting up
permissions.
CHANGE_OF_BILL_TO_IN_PROGRESS (int): Billing Setups cannot be created if there is already a future-approved
billing.
PAYMENTS_PROFILE_NOT_FOUND (int): Billing Setup creation failed because Payments could not find the
requested Payments profile.
PAYMENTS_ACCOUNT_NOT_FOUND (int): Billing Setup creation failed because Payments could not find the
requested Payments account.
PAYMENTS_PROFILE_INELIGIBLE (int): Billing Setup creation failed because Payments considers requested
Payments profile ineligible.
PAYMENTS_ACCOUNT_INELIGIBLE (int): Billing Setup creation failed because Payments considers requested
Payments account ineligible.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CANNOT_USE_EXISTING_AND_NEW_ACCOUNT = 2
CANNOT_REMOVE_STARTED_BILLING_SETUP = 3
CANNOT_CHANGE_BILLING_TO_SAME_PAYMENTS_ACCOUNT = 4
BILLING_SETUP_NOT_PERMITTED_FOR_CUSTOMER_STATUS = 5
INVALID_PAYMENTS_ACCOUNT = 6
BILLING_SETUP_NOT_PERMITTED_FOR_CUSTOMER_CATEGORY = 7
INVALID_START_TIME_TYPE = 8
THIRD_PARTY_ALREADY_HAS_BILLING = 9
BILLING_SETUP_IN_PROGRESS = 10
NO_SIGNUP_PERMISSION = 11
CHANGE_OF_BILL_TO_IN_PROGRESS = 12
PAYMENTS_PROFILE_NOT_FOUND = 13
PAYMENTS_ACCOUNT_NOT_FOUND = 14
PAYMENTS_PROFILE_INELIGIBLE = 15
PAYMENTS_ACCOUNT_INELIGIBLE = 16
class BillingSetupStatusEnum(object):
class BillingSetupStatus(enum.IntEnum):
"""
The possible statuses of a BillingSetup.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PENDING (int): The billing setup is pending approval.
APPROVED_HELD (int): The billing setup has been approved but the corresponding first budget
has not. This can only occur for billing setups configured for monthly
invoicing.
APPROVED (int): The billing setup has been approved.
CANCELLED (int): The billing setup was cancelled by the user prior to approval.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PENDING = 2
APPROVED_HELD = 3
APPROVED = 4
CANCELLED = 5
class BrandSafetySuitabilityEnum(object):
class BrandSafetySuitability(enum.IntEnum):
"""
3-Tier brand safety suitability control.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
EXPANDED_INVENTORY (int): This option lets you show ads across all inventory on YouTube and video
partners that meet our standards for monetization. This option may be an
appropriate choice for brands that want maximum access to the full
breadth of videos eligible for ads, including, for example, videos that
have strong profanity in the context of comedy or a documentary, or
excessive violence as featured in video games.
STANDARD_INVENTORY (int): This option lets you show ads across a wide range of content that's
appropriate for most brands, such as popular music videos, documentaries,
and movie trailers. The content you can show ads on is based on YouTube's
advertiser-friendly content guidelines that take into account, for
example, the strength or frequency of profanity, or the appropriateness
of subject matter like sensitive events. Ads won't show, for example, on
content with repeated strong profanity, strong sexual content, or graphic
violence.
LIMITED_INVENTORY (int): This option lets you show ads on a reduced range of content that's
appropriate for brands with particularly strict guidelines around
inappropriate language and sexual suggestiveness; above and beyond what
YouTube's advertiser-friendly content guidelines address. The videos
accessible in this sensitive category meet heightened requirements,
especially for inappropriate language and sexual suggestiveness. For
example, your ads will be excluded from showing on some of YouTube's most
popular music videos and other pop culture content across YouTube and
Google video partners.
"""
UNSPECIFIED = 0
UNKNOWN = 1
EXPANDED_INVENTORY = 2
STANDARD_INVENTORY = 3
LIMITED_INVENTORY = 4
class BudgetDeliveryMethodEnum(object):
class BudgetDeliveryMethod(enum.IntEnum):
"""
Possible delivery methods of a Budget.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
STANDARD (int): The budget server will throttle serving evenly across
the entire time period.
ACCELERATED (int): The budget server will not throttle serving,
and ads will serve as fast as possible.
"""
UNSPECIFIED = 0
UNKNOWN = 1
STANDARD = 2
ACCELERATED = 3
class BudgetPeriodEnum(object):
class BudgetPeriod(enum.IntEnum):
"""
Possible period of a Budget.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
DAILY (int): Daily budget.
CUSTOM (int): Custom budget.
FIXED_DAILY (int): Fixed daily budget.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DAILY = 2
CUSTOM = 3
FIXED_DAILY = 4
class BudgetStatusEnum(object):
class BudgetStatus(enum.IntEnum):
"""
Possible statuses of a Budget.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ENABLED (int): Budget is enabled.
REMOVED (int): Budget is removed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
class BudgetTypeEnum(object):
class BudgetType(enum.IntEnum):
"""
Possible Budget types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
STANDARD (int): Budget type for standard Google Ads usage.
HOTEL_ADS_COMMISSION (int): Budget type for Hotels Ads commission program.
"""
UNSPECIFIED = 0
UNKNOWN = 1
STANDARD = 2
HOTEL_ADS_COMMISSION = 3
class CallConversionReportingStateEnum(object):
class CallConversionReportingState(enum.IntEnum):
"""
Possible data types for a call conversion action state.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
DISABLED (int): Call conversion action is disabled.
USE_ACCOUNT_LEVEL_CALL_CONVERSION_ACTION (int): Call conversion action will use call conversion type set at the
account level.
USE_RESOURCE_LEVEL_CALL_CONVERSION_ACTION (int): Call conversion action will use call conversion type set at the resource
(call only ads/call extensions) level.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DISABLED = 2
USE_ACCOUNT_LEVEL_CALL_CONVERSION_ACTION = 3
USE_RESOURCE_LEVEL_CALL_CONVERSION_ACTION = 4
class CallPlaceholderFieldEnum(object):
class CallPlaceholderField(enum.IntEnum):
"""
Possible values for Call placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PHONE_NUMBER (int): Data Type: STRING. The advertiser's phone number to append to the ad.
COUNTRY_CODE (int): Data Type: STRING. Uppercase two-letter country code of the advertiser's
phone number.
TRACKED (int): Data Type: BOOLEAN. Indicates whether call tracking is enabled. Default:
true.
CONVERSION_TYPE_ID (int): Data Type: INT64. The ID of an AdCallMetricsConversion object. This
object contains the phoneCallDurationfield which is the minimum duration
(in seconds) of a call to be considered a conversion.
CONVERSION_REPORTING_STATE (int): Data Type: STRING. Indicates whether this call extension uses its own
call conversion setting or follows the account level setting. Valid
values are: USE\_ACCOUNT\_LEVEL\_CALL\_CONVERSION\_ACTION and
USE\_RESOURCE\_LEVEL\_CALL\_CONVERSION\_ACTION.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PHONE_NUMBER = 2
COUNTRY_CODE = 3
TRACKED = 4
CONVERSION_TYPE_ID = 5
CONVERSION_REPORTING_STATE = 6
class CalloutPlaceholderFieldEnum(object):
class CalloutPlaceholderField(enum.IntEnum):
"""
Possible values for Callout placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
CALLOUT_TEXT (int): Data Type: STRING. Callout text.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CALLOUT_TEXT = 2
class CampaignBudgetErrorEnum(object):
class CampaignBudgetError(enum.IntEnum):
"""
Enum describing possible campaign budget errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CAMPAIGN_BUDGET_CANNOT_BE_SHARED (int): The campaign budget cannot be shared.
CAMPAIGN_BUDGET_REMOVED (int): The requested campaign budget no longer exists.
CAMPAIGN_BUDGET_IN_USE (int): The campaign budget is associated with at least one campaign, and so the
campaign budget cannot be removed.
CAMPAIGN_BUDGET_PERIOD_NOT_AVAILABLE (int): Customer is not whitelisted for this campaign budget period.
CANNOT_MODIFY_FIELD_OF_IMPLICITLY_SHARED_CAMPAIGN_BUDGET (int): This field is not mutable on implicitly shared campaign budgets
CANNOT_UPDATE_CAMPAIGN_BUDGET_TO_IMPLICITLY_SHARED (int): Cannot change explicitly shared campaign budgets back to implicitly
shared ones.
CANNOT_UPDATE_CAMPAIGN_BUDGET_TO_EXPLICITLY_SHARED_WITHOUT_NAME (int): An implicit campaign budget without a name cannot be changed to
explicitly shared campaign budget.
CANNOT_UPDATE_CAMPAIGN_BUDGET_TO_EXPLICITLY_SHARED (int): Cannot change an implicitly shared campaign budget to an explicitly
shared one.
CANNOT_USE_IMPLICITLY_SHARED_CAMPAIGN_BUDGET_WITH_MULTIPLE_CAMPAIGNS (int): Only explicitly shared campaign budgets can be used with multiple
campaigns.
DUPLICATE_NAME (int): A campaign budget with this name already exists.
MONEY_AMOUNT_IN_WRONG_CURRENCY (int): A money amount was not in the expected currency.
MONEY_AMOUNT_LESS_THAN_CURRENCY_MINIMUM_CPC (int): A money amount was less than the minimum CPC for currency.
MONEY_AMOUNT_TOO_LARGE (int): A money amount was greater than the maximum allowed.
NEGATIVE_MONEY_AMOUNT (int): A money amount was negative.
NON_MULTIPLE_OF_MINIMUM_CURRENCY_UNIT (int): A money amount was not a multiple of a minimum unit.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CAMPAIGN_BUDGET_CANNOT_BE_SHARED = 17
CAMPAIGN_BUDGET_REMOVED = 2
CAMPAIGN_BUDGET_IN_USE = 3
CAMPAIGN_BUDGET_PERIOD_NOT_AVAILABLE = 4
CANNOT_MODIFY_FIELD_OF_IMPLICITLY_SHARED_CAMPAIGN_BUDGET = 6
CANNOT_UPDATE_CAMPAIGN_BUDGET_TO_IMPLICITLY_SHARED = 7
CANNOT_UPDATE_CAMPAIGN_BUDGET_TO_EXPLICITLY_SHARED_WITHOUT_NAME = 8
CANNOT_UPDATE_CAMPAIGN_BUDGET_TO_EXPLICITLY_SHARED = 9
CANNOT_USE_IMPLICITLY_SHARED_CAMPAIGN_BUDGET_WITH_MULTIPLE_CAMPAIGNS = 10
DUPLICATE_NAME = 11
MONEY_AMOUNT_IN_WRONG_CURRENCY = 12
MONEY_AMOUNT_LESS_THAN_CURRENCY_MINIMUM_CPC = 13
MONEY_AMOUNT_TOO_LARGE = 14
NEGATIVE_MONEY_AMOUNT = 15
NON_MULTIPLE_OF_MINIMUM_CURRENCY_UNIT = 16
class CampaignCriterionErrorEnum(object):
class CampaignCriterionError(enum.IntEnum):
"""
Enum describing possible campaign criterion errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CONCRETE_TYPE_REQUIRED (int): Concrete type of criterion (keyword v.s. placement) is required for
CREATE and UPDATE operations.
INVALID_PLACEMENT_URL (int): Invalid placement URL.
CANNOT_EXCLUDE_CRITERIA_TYPE (int): Criteria type can not be excluded for the campaign by the customer. like
AOL account type cannot target site type criteria
CANNOT_SET_STATUS_FOR_CRITERIA_TYPE (int): Cannot set the campaign criterion status for this criteria type.
CANNOT_SET_STATUS_FOR_EXCLUDED_CRITERIA (int): Cannot set the campaign criterion status for an excluded criteria.
CANNOT_TARGET_AND_EXCLUDE (int): Cannot target and exclude the same criterion.
TOO_MANY_OPERATIONS (int): The mutate contained too many operations.
OPERATOR_NOT_SUPPORTED_FOR_CRITERION_TYPE (int): This operator cannot be applied to a criterion of this type.
SHOPPING_CAMPAIGN_SALES_COUNTRY_NOT_SUPPORTED_FOR_SALES_CHANNEL (int): The Shopping campaign sales country is not supported for
ProductSalesChannel targeting.
CANNOT_ADD_EXISTING_FIELD (int): The existing field can't be updated with CREATE operation. It can be
updated with UPDATE operation only.
CANNOT_UPDATE_NEGATIVE_CRITERION (int): Negative criteria are immutable, so updates are not allowed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CONCRETE_TYPE_REQUIRED = 2
INVALID_PLACEMENT_URL = 3
CANNOT_EXCLUDE_CRITERIA_TYPE = 4
CANNOT_SET_STATUS_FOR_CRITERIA_TYPE = 5
CANNOT_SET_STATUS_FOR_EXCLUDED_CRITERIA = 6
CANNOT_TARGET_AND_EXCLUDE = 7
TOO_MANY_OPERATIONS = 8
OPERATOR_NOT_SUPPORTED_FOR_CRITERION_TYPE = 9
SHOPPING_CAMPAIGN_SALES_COUNTRY_NOT_SUPPORTED_FOR_SALES_CHANNEL = 10
CANNOT_ADD_EXISTING_FIELD = 11
CANNOT_UPDATE_NEGATIVE_CRITERION = 12
class CampaignErrorEnum(object):
class CampaignError(enum.IntEnum):
"""
Enum describing possible campaign errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CANNOT_TARGET_CONTENT_NETWORK (int): Cannot target content network.
CANNOT_TARGET_SEARCH_NETWORK (int): Cannot target search network.
CANNOT_TARGET_SEARCH_NETWORK_WITHOUT_GOOGLE_SEARCH (int): Cannot cover search network without google search network.
CANNOT_TARGET_GOOGLE_SEARCH_FOR_CPM_CAMPAIGN (int): Cannot target Google Search network for a CPM campaign.
CAMPAIGN_MUST_TARGET_AT_LEAST_ONE_NETWORK (int): Must target at least one network.
CANNOT_TARGET_PARTNER_SEARCH_NETWORK (int): Only some Google partners are allowed to target partner search network.
CANNOT_TARGET_CONTENT_NETWORK_ONLY_WITH_CRITERIA_LEVEL_BIDDING_STRATEGY (int): Cannot target content network only as campaign has criteria-level bidding
strategy.
CAMPAIGN_DURATION_MUST_CONTAIN_ALL_RUNNABLE_TRIALS (int): Cannot modify the start or end date such that the campaign duration would
not contain the durations of all runnable trials.
CANNOT_MODIFY_FOR_TRIAL_CAMPAIGN (int): Cannot modify dates, budget or campaign name of a trial campaign.
DUPLICATE_CAMPAIGN_NAME (int): Trying to modify the name of an active or paused campaign, where the name
is already assigned to another active or paused campaign.
INCOMPATIBLE_CAMPAIGN_FIELD (int): Two fields are in conflicting modes.
INVALID_CAMPAIGN_NAME (int): Campaign name cannot be used.
INVALID_AD_SERVING_OPTIMIZATION_STATUS (int): Given status is invalid.
INVALID_TRACKING_URL (int): Error in the campaign level tracking url.
CANNOT_SET_BOTH_TRACKING_URL_TEMPLATE_AND_TRACKING_SETTING (int): Cannot set both tracking url template and tracking setting. An user has
to clear legacy tracking setting in order to add tracking url template.
MAX_IMPRESSIONS_NOT_IN_RANGE (int): The maximum number of impressions for Frequency Cap should be an integer
greater than 0.
TIME_UNIT_NOT_SUPPORTED (int): Only the Day, Week and Month time units are supported.
INVALID_OPERATION_IF_SERVING_STATUS_HAS_ENDED (int): Operation not allowed on a campaign whose serving status has ended
BUDGET_CANNOT_BE_SHARED (int): This budget is exclusively linked to a Campaign that is using experiments
so it cannot be shared.
CAMPAIGN_CANNOT_USE_SHARED_BUDGET (int): Campaigns using experiments cannot use a shared budget.
CANNOT_CHANGE_BUDGET_ON_CAMPAIGN_WITH_TRIALS (int): A different budget cannot be assigned to a campaign when there are
running or scheduled trials.
CAMPAIGN_LABEL_DOES_NOT_EXIST (int): No link found between the campaign and the label.
CAMPAIGN_LABEL_ALREADY_EXISTS (int): The label has already been attached to the campaign.
MISSING_SHOPPING_SETTING (int): A ShoppingSetting was not found when creating a shopping campaign.
INVALID_SHOPPING_SALES_COUNTRY (int): The country in shopping setting is not an allowed country.
MISSING_UNIVERSAL_APP_CAMPAIGN_SETTING (int): A Campaign with channel sub type UNIVERSAL\_APP\_CAMPAIGN must have a
UniversalAppCampaignSetting specified.
ADVERTISING_CHANNEL_TYPE_NOT_AVAILABLE_FOR_ACCOUNT_TYPE (int): The requested channel type is not available according to the customer's
account setting.
INVALID_ADVERTISING_CHANNEL_SUB_TYPE (int): The AdvertisingChannelSubType is not a valid subtype of the primary
channel type.
AT_LEAST_ONE_CONVERSION_MUST_BE_SELECTED (int): At least one conversion must be selected.
CANNOT_SET_AD_ROTATION_MODE (int): Setting ad rotation mode for a campaign is not allowed. Ad rotation mode
at campaign is deprecated.
CANNOT_MODIFY_START_DATE_IF_ALREADY_STARTED (int): Trying to change start date on a campaign that has started.
CANNOT_SET_DATE_TO_PAST (int): Trying to modify a date into the past.
MISSING_HOTEL_CUSTOMER_LINK (int): Hotel center id in the hotel setting does not match any customer links.
INVALID_HOTEL_CUSTOMER_LINK (int): Hotel center id in the hotel setting must match an active customer link.
MISSING_HOTEL_SETTING (int): Hotel setting was not found when creating a hotel ads campaign.
CANNOT_USE_SHARED_CAMPAIGN_BUDGET_WHILE_PART_OF_CAMPAIGN_GROUP (int): A Campaign cannot use shared campaign budgets and be part of a campaign
group.
APP_NOT_FOUND (int): The app ID was not found.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CANNOT_TARGET_CONTENT_NETWORK = 3
CANNOT_TARGET_SEARCH_NETWORK = 4
CANNOT_TARGET_SEARCH_NETWORK_WITHOUT_GOOGLE_SEARCH = 5
CANNOT_TARGET_GOOGLE_SEARCH_FOR_CPM_CAMPAIGN = 6
CAMPAIGN_MUST_TARGET_AT_LEAST_ONE_NETWORK = 7
CANNOT_TARGET_PARTNER_SEARCH_NETWORK = 8
CANNOT_TARGET_CONTENT_NETWORK_ONLY_WITH_CRITERIA_LEVEL_BIDDING_STRATEGY = 9
CAMPAIGN_DURATION_MUST_CONTAIN_ALL_RUNNABLE_TRIALS = 10
CANNOT_MODIFY_FOR_TRIAL_CAMPAIGN = 11
DUPLICATE_CAMPAIGN_NAME = 12
INCOMPATIBLE_CAMPAIGN_FIELD = 13
INVALID_CAMPAIGN_NAME = 14
INVALID_AD_SERVING_OPTIMIZATION_STATUS = 15
INVALID_TRACKING_URL = 16
CANNOT_SET_BOTH_TRACKING_URL_TEMPLATE_AND_TRACKING_SETTING = 17
MAX_IMPRESSIONS_NOT_IN_RANGE = 18
TIME_UNIT_NOT_SUPPORTED = 19
INVALID_OPERATION_IF_SERVING_STATUS_HAS_ENDED = 20
BUDGET_CANNOT_BE_SHARED = 21
CAMPAIGN_CANNOT_USE_SHARED_BUDGET = 22
CANNOT_CHANGE_BUDGET_ON_CAMPAIGN_WITH_TRIALS = 23
CAMPAIGN_LABEL_DOES_NOT_EXIST = 24
CAMPAIGN_LABEL_ALREADY_EXISTS = 25
MISSING_SHOPPING_SETTING = 26
INVALID_SHOPPING_SALES_COUNTRY = 27
MISSING_UNIVERSAL_APP_CAMPAIGN_SETTING = 30
ADVERTISING_CHANNEL_TYPE_NOT_AVAILABLE_FOR_ACCOUNT_TYPE = 31
INVALID_ADVERTISING_CHANNEL_SUB_TYPE = 32
AT_LEAST_ONE_CONVERSION_MUST_BE_SELECTED = 33
CANNOT_SET_AD_ROTATION_MODE = 34
CANNOT_MODIFY_START_DATE_IF_ALREADY_STARTED = 35
CANNOT_SET_DATE_TO_PAST = 36
MISSING_HOTEL_CUSTOMER_LINK = 37
INVALID_HOTEL_CUSTOMER_LINK = 38
MISSING_HOTEL_SETTING = 39
CANNOT_USE_SHARED_CAMPAIGN_BUDGET_WHILE_PART_OF_CAMPAIGN_GROUP = 40
APP_NOT_FOUND = 41
class CampaignFeedErrorEnum(object):
class CampaignFeedError(enum.IntEnum):
"""
Enum describing possible campaign feed errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE (int): An active feed already exists for this campaign and placeholder type.
CANNOT_CREATE_FOR_REMOVED_FEED (int): The specified feed is removed.
CANNOT_CREATE_ALREADY_EXISTING_CAMPAIGN_FEED (int): The CampaignFeed already exists. UPDATE should be used to modify the
existing CampaignFeed.
CANNOT_MODIFY_REMOVED_CAMPAIGN_FEED (int): Cannot update removed campaign feed.
INVALID_PLACEHOLDER_TYPE (int): Invalid placeholder type.
MISSING_FEEDMAPPING_FOR_PLACEHOLDER_TYPE (int): Feed mapping for this placeholder type does not exist.
"""
UNSPECIFIED = 0
UNKNOWN = 1
FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE = 2
CANNOT_CREATE_FOR_REMOVED_FEED = 4
CANNOT_CREATE_ALREADY_EXISTING_CAMPAIGN_FEED = 5
CANNOT_MODIFY_REMOVED_CAMPAIGN_FEED = 6
INVALID_PLACEHOLDER_TYPE = 7
MISSING_FEEDMAPPING_FOR_PLACEHOLDER_TYPE = 8
class CampaignServingStatusEnum(object):
class CampaignServingStatus(enum.IntEnum):
"""
Possible serving statuses of a campaign.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
SERVING (int): Serving.
NONE (int): None.
ENDED (int): Ended.
PENDING (int): Pending.
SUSPENDED (int): Suspended.
"""
UNSPECIFIED = 0
UNKNOWN = 1
SERVING = 2
NONE = 3
ENDED = 4
PENDING = 5
SUSPENDED = 6
class CampaignSharedSetErrorEnum(object):
class CampaignSharedSetError(enum.IntEnum):
"""
Enum describing possible campaign shared set errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
SHARED_SET_ACCESS_DENIED (int): The shared set belongs to another customer and permission isn't granted.
"""
UNSPECIFIED = 0
UNKNOWN = 1
SHARED_SET_ACCESS_DENIED = 2
class CampaignSharedSetStatusEnum(object):
class CampaignSharedSetStatus(enum.IntEnum):
"""
Enum listing the possible campaign shared set statuses.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ENABLED (int): The campaign shared set is enabled.
REMOVED (int): The campaign shared set is removed and can no longer be used.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
class CampaignStatusEnum(object):
class CampaignStatus(enum.IntEnum):
"""
Possible statuses of a campaign.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ENABLED (int): Campaign is currently serving ads depending on budget information.
PAUSED (int): Campaign has been paused by the user.
REMOVED (int): Campaign has been removed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
PAUSED = 3
REMOVED = 4
class ChangeStatusErrorEnum(object):
class ChangeStatusError(enum.IntEnum):
"""
Enum describing possible change status errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
START_DATE_TOO_OLD (int): The requested start date is too old.
"""
UNSPECIFIED = 0
UNKNOWN = 1
START_DATE_TOO_OLD = 3
class ChangeStatusOperationEnum(object):
class ChangeStatusOperation(enum.IntEnum):
"""
Status of the changed resource
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): Used for return value only. Represents an unclassified resource unknown
in this version.
ADDED (int): The resource was created.
CHANGED (int): The resource was modified.
REMOVED (int): The resource was removed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ADDED = 2
CHANGED = 3
REMOVED = 4
class ChangeStatusResourceTypeEnum(object):
class ChangeStatusResourceType(enum.IntEnum):
"""
Enum listing the resource types support by the ChangeStatus resource.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): Used for return value only. Represents an unclassified resource unknown
in this version.
AD_GROUP (int): An AdGroup resource change.
AD_GROUP_AD (int): An AdGroupAd resource change.
AD_GROUP_CRITERION (int): An AdGroupCriterion resource change.
CAMPAIGN (int): A Campaign resource change.
CAMPAIGN_CRITERION (int): A CampaignCriterion resource change.
FEED (int): A Feed resource change.
FEED_ITEM (int): A FeedItem resource change.
AD_GROUP_FEED (int): An AdGroupFeed resource change.
CAMPAIGN_FEED (int): A CampaignFeed resource change.
AD_GROUP_BID_MODIFIER (int): An AdGroupBidModifier resource change.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AD_GROUP = 3
AD_GROUP_AD = 4
AD_GROUP_CRITERION = 5
CAMPAIGN = 6
CAMPAIGN_CRITERION = 7
FEED = 9
FEED_ITEM = 10
AD_GROUP_FEED = 11
CAMPAIGN_FEED = 12
AD_GROUP_BID_MODIFIER = 13
class ClickTypeEnum(object):
class ClickType(enum.IntEnum):
"""
Enumerates Google Ads click types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
APP_DEEPLINK (int): App engagement ad deep link.
BREADCRUMBS (int): Breadcrumbs.
BROADBAND_PLAN (int): Broadband Plan.
CALL_TRACKING (int): Manually dialed phone calls.
CALLS (int): Phone calls.
CLICK_ON_ENGAGEMENT_AD (int): Click on engagement ad.
GET_DIRECTIONS (int): Driving direction.
LOCATION_EXPANSION (int): Get location details.
LOCATION_FORMAT_CALL (int): Call.
LOCATION_FORMAT_DIRECTIONS (int): Directions.
LOCATION_FORMAT_IMAGE (int): Image(s).
LOCATION_FORMAT_LANDING_PAGE (int): Go to landing page.
LOCATION_FORMAT_MAP (int): Map.
LOCATION_FORMAT_STORE_INFO (int): Go to store info.
LOCATION_FORMAT_TEXT (int): Text.
MOBILE_CALL_TRACKING (int): Mobile phone calls.
OFFER_PRINTS (int): Print offer.
OTHER (int): Other.
PRODUCT_EXTENSION_CLICKS (int): Product plusbox offer.
PRODUCT_LISTING_AD_CLICKS (int): Shopping - Product - Online.
SITELINKS (int): Sitelink.
STORE_LOCATOR (int): Show nearby locations.
URL_CLICKS (int): Headline.
VIDEO_APP_STORE_CLICKS (int): App store.
VIDEO_CALL_TO_ACTION_CLICKS (int): Call-to-Action overlay.
VIDEO_CARD_ACTION_HEADLINE_CLICKS (int): Cards.
VIDEO_END_CAP_CLICKS (int): End cap.
VIDEO_WEBSITE_CLICKS (int): Website.
VISUAL_SITELINKS (int): Visual Sitelinks.
WIRELESS_PLAN (int): Wireless Plan.
PRODUCT_LISTING_AD_LOCAL (int): Shopping - Product - Local.
PRODUCT_LISTING_AD_MULTICHANNEL_LOCAL (int): Shopping - Product - MultiChannel Local.
PRODUCT_LISTING_AD_MULTICHANNEL_ONLINE (int): Shopping - Product - MultiChannel Online.
PRODUCT_LISTING_ADS_COUPON (int): Shopping - Product - Coupon.
PRODUCT_LISTING_AD_TRANSACTABLE (int): Shopping - Product - Sell on Google.
PRODUCT_AD_APP_DEEPLINK (int): Shopping - Product - App engagement ad deep link.
SHOWCASE_AD_CATEGORY_LINK (int): Shopping - Showcase - Category.
SHOWCASE_AD_LOCAL_STOREFRONT_LINK (int): Shopping - Showcase - Local storefront.
SHOWCASE_AD_ONLINE_PRODUCT_LINK (int): Shopping - Showcase - Online product.
SHOWCASE_AD_LOCAL_PRODUCT_LINK (int): Shopping - Showcase - Local product.
PROMOTION_EXTENSION (int): Promotion Extension.
SWIPEABLE_GALLERY_AD_HEADLINE (int): Ad Headline.
SWIPEABLE_GALLERY_AD_SWIPES (int): Swipes.
SWIPEABLE_GALLERY_AD_SEE_MORE (int): See More.
SWIPEABLE_GALLERY_AD_SITELINK_ONE (int): Sitelink 1.
SWIPEABLE_GALLERY_AD_SITELINK_TWO (int): Sitelink 2.
SWIPEABLE_GALLERY_AD_SITELINK_THREE (int): Sitelink 3.
SWIPEABLE_GALLERY_AD_SITELINK_FOUR (int): Sitelink 4.
SWIPEABLE_GALLERY_AD_SITELINK_FIVE (int): Sitelink 5.
HOTEL_PRICE (int): Hotel price.
PRICE_EXTENSION (int): Price Extension.
HOTEL_BOOK_ON_GOOGLE_ROOM_SELECTION (int): Book on Google hotel room selection.
"""
UNSPECIFIED = 0
UNKNOWN = 1
APP_DEEPLINK = 2
BREADCRUMBS = 3
BROADBAND_PLAN = 4
CALL_TRACKING = 5
CALLS = 6
CLICK_ON_ENGAGEMENT_AD = 7
GET_DIRECTIONS = 8
LOCATION_EXPANSION = 9
LOCATION_FORMAT_CALL = 10
LOCATION_FORMAT_DIRECTIONS = 11
LOCATION_FORMAT_IMAGE = 12
LOCATION_FORMAT_LANDING_PAGE = 13
LOCATION_FORMAT_MAP = 14
LOCATION_FORMAT_STORE_INFO = 15
LOCATION_FORMAT_TEXT = 16
MOBILE_CALL_TRACKING = 17
OFFER_PRINTS = 18
OTHER = 19
PRODUCT_EXTENSION_CLICKS = 20
PRODUCT_LISTING_AD_CLICKS = 21
SITELINKS = 22
STORE_LOCATOR = 23
URL_CLICKS = 25
VIDEO_APP_STORE_CLICKS = 26
VIDEO_CALL_TO_ACTION_CLICKS = 27
VIDEO_CARD_ACTION_HEADLINE_CLICKS = 28
VIDEO_END_CAP_CLICKS = 29
VIDEO_WEBSITE_CLICKS = 30
VISUAL_SITELINKS = 31
WIRELESS_PLAN = 32
PRODUCT_LISTING_AD_LOCAL = 33
PRODUCT_LISTING_AD_MULTICHANNEL_LOCAL = 34
PRODUCT_LISTING_AD_MULTICHANNEL_ONLINE = 35
PRODUCT_LISTING_ADS_COUPON = 36
PRODUCT_LISTING_AD_TRANSACTABLE = 37
PRODUCT_AD_APP_DEEPLINK = 38
SHOWCASE_AD_CATEGORY_LINK = 39
SHOWCASE_AD_LOCAL_STOREFRONT_LINK = 40
SHOWCASE_AD_ONLINE_PRODUCT_LINK = 42
SHOWCASE_AD_LOCAL_PRODUCT_LINK = 43
PROMOTION_EXTENSION = 44
SWIPEABLE_GALLERY_AD_HEADLINE = 45
SWIPEABLE_GALLERY_AD_SWIPES = 46
SWIPEABLE_GALLERY_AD_SEE_MORE = 47
SWIPEABLE_GALLERY_AD_SITELINK_ONE = 48
SWIPEABLE_GALLERY_AD_SITELINK_TWO = 49
SWIPEABLE_GALLERY_AD_SITELINK_THREE = 50
SWIPEABLE_GALLERY_AD_SITELINK_FOUR = 51
SWIPEABLE_GALLERY_AD_SITELINK_FIVE = 52
HOTEL_PRICE = 53
PRICE_EXTENSION = 54
HOTEL_BOOK_ON_GOOGLE_ROOM_SELECTION = 55
class CollectionSizeErrorEnum(object):
class CollectionSizeError(enum.IntEnum):
"""
Enum describing possible collection size errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
TOO_FEW (int): Too few.
TOO_MANY (int): Too many.
"""
UNSPECIFIED = 0
UNKNOWN = 1
TOO_FEW = 2
TOO_MANY = 3
class ContentLabelTypeEnum(object):
class ContentLabelType(enum.IntEnum):
"""
Enum listing the content label types supported by ContentLabel criterion.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
SEXUALLY_SUGGESTIVE (int): Sexually suggestive content.
BELOW_THE_FOLD (int): Below the fold placement.
PARKED_DOMAIN (int): Parked domain.
GAME (int): Game.
JUVENILE (int): Juvenile, gross & bizarre content.
PROFANITY (int): Profanity & rough language.
TRAGEDY (int): Death & tragedy.
VIDEO (int): Video.
VIDEO_RATING_DV_G (int): Content rating: G.
VIDEO_RATING_DV_PG (int): Content rating: PG.
VIDEO_RATING_DV_T (int): Content rating: T.
VIDEO_RATING_DV_MA (int): Content rating: MA.
VIDEO_NOT_YET_RATED (int): Content rating: not yet rated.
EMBEDDED_VIDEO (int): Embedded video.
LIVE_STREAMING_VIDEO (int): Live streaming video.
"""
UNSPECIFIED = 0
UNKNOWN = 1
SEXUALLY_SUGGESTIVE = 2
BELOW_THE_FOLD = 3
PARKED_DOMAIN = 4
GAME = 5
JUVENILE = 6
PROFANITY = 7
TRAGEDY = 8
VIDEO = 9
VIDEO_RATING_DV_G = 10
VIDEO_RATING_DV_PG = 11
VIDEO_RATING_DV_T = 12
VIDEO_RATING_DV_MA = 13
VIDEO_NOT_YET_RATED = 14
EMBEDDED_VIDEO = 15
LIVE_STREAMING_VIDEO = 16
class ContextErrorEnum(object):
class ContextError(enum.IntEnum):
"""
Enum describing possible context errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
OPERATION_NOT_PERMITTED_FOR_CONTEXT (int): The operation is not allowed for the given context.
OPERATION_NOT_PERMITTED_FOR_REMOVED_RESOURCE (int): The operation is not allowed for removed resources.
"""
UNSPECIFIED = 0
UNKNOWN = 1
OPERATION_NOT_PERMITTED_FOR_CONTEXT = 2
OPERATION_NOT_PERMITTED_FOR_REMOVED_RESOURCE = 3
class ConversionActionCategoryEnum(object):
class ConversionActionCategory(enum.IntEnum):
"""
The category of conversions that are associated with a ConversionAction.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
DEFAULT (int): Default category.
PAGE_VIEW (int): User visiting a page.
PURCHASE (int): Purchase, sales, or "order placed" event.
SIGNUP (int): Signup user action.
LEAD (int): Lead-generating action.
DOWNLOAD (int): Software download action (as for an app).
"""
UNSPECIFIED = 0
UNKNOWN = 1
DEFAULT = 2
PAGE_VIEW = 3
PURCHASE = 4
SIGNUP = 5
LEAD = 6
DOWNLOAD = 7
class ConversionActionCountingTypeEnum(object):
class ConversionActionCountingType(enum.IntEnum):
"""
Indicates how conversions for this action will be counted. For more
information, see https://support.google.com/google-ads/answer/3438531.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ONE_PER_CLICK (int): Count only one conversion per click.
MANY_PER_CLICK (int): Count all conversions per click.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ONE_PER_CLICK = 2
MANY_PER_CLICK = 3
class ConversionActionErrorEnum(object):
class ConversionActionError(enum.IntEnum):
"""
Enum describing possible conversion action errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
DUPLICATE_NAME (int): The specified conversion action name already exists.
DUPLICATE_APP_ID (int): Another conversion action with the specified app id already exists.
TWO_CONVERSION_ACTIONS_BIDDING_ON_SAME_APP_DOWNLOAD (int): Android first open action conflicts with Google play codeless download
action tracking the same app.
BIDDING_ON_SAME_APP_DOWNLOAD_AS_GLOBAL_ACTION (int): Android first open action conflicts with Google play codeless download
action tracking the same app.
DATA_DRIVEN_MODEL_WAS_NEVER_GENERATED (int): The attribution model cannot be set to DATA\_DRIVEN because a
data-driven model has never been generated.
DATA_DRIVEN_MODEL_EXPIRED (int): The attribution model cannot be set to DATA\_DRIVEN because the
data-driven model is expired.
DATA_DRIVEN_MODEL_STALE (int): The attribution model cannot be set to DATA\_DRIVEN because the
data-driven model is stale.
DATA_DRIVEN_MODEL_UNKNOWN (int): The attribution model cannot be set to DATA\_DRIVEN because the
data-driven model is unavailable or the conversion action was newly
added.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DUPLICATE_NAME = 2
DUPLICATE_APP_ID = 3
TWO_CONVERSION_ACTIONS_BIDDING_ON_SAME_APP_DOWNLOAD = 4
BIDDING_ON_SAME_APP_DOWNLOAD_AS_GLOBAL_ACTION = 5
DATA_DRIVEN_MODEL_WAS_NEVER_GENERATED = 6
DATA_DRIVEN_MODEL_EXPIRED = 7
DATA_DRIVEN_MODEL_STALE = 8
DATA_DRIVEN_MODEL_UNKNOWN = 9
class ConversionActionStatusEnum(object):
class ConversionActionStatus(enum.IntEnum):
"""
Possible statuses of a conversion action.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ENABLED (int): Conversions will be recorded.
REMOVED (int): Conversions will not be recorded.
HIDDEN (int): Conversions will not be recorded and the conversion action will not
appear in the UI.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
HIDDEN = 4
class ConversionActionTypeEnum(object):
class ConversionActionType(enum.IntEnum):
"""
Possible types of a conversion action.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
AD_CALL (int): Conversions that occur when a user clicks on an ad's call extension.
CLICK_TO_CALL (int): Conversions that occur when a user on a mobile device clicks a phone
number.
GOOGLE_PLAY_DOWNLOAD (int): Conversions that occur when a user downloads a mobile app from the Google
Play Store.
GOOGLE_PLAY_IN_APP_PURCHASE (int): Conversions that occur when a user makes a purchase in an app through
Android billing.
UPLOAD_CALLS (int): Call conversions that are tracked by the advertiser and uploaded.
UPLOAD_CLICKS (int): Conversions that are tracked by the advertiser and uploaded with
attributed clicks.
WEBPAGE (int): Conversions that occur on a webpage.
WEBSITE_CALL (int): Conversions that occur when a user calls a dynamically-generated phone
number from an advertiser's website.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AD_CALL = 2
CLICK_TO_CALL = 3
GOOGLE_PLAY_DOWNLOAD = 4
GOOGLE_PLAY_IN_APP_PURCHASE = 5
UPLOAD_CALLS = 6
UPLOAD_CLICKS = 7
WEBPAGE = 8
WEBSITE_CALL = 9
class ConversionAdjustmentTypeEnum(object):
class ConversionAdjustmentType(enum.IntEnum):
"""
The different actions advertisers can take to adjust the conversions that
they already reported. Retractions negate a conversion. Restatements change
the value of a conversion.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Represents value unknown in this version.
RETRACTION (int): Negates a conversion so that its total value and count are both zero.
RESTATEMENT (int): Changes the value of a conversion.
"""
UNSPECIFIED = 0
UNKNOWN = 1
RETRACTION = 2
RESTATEMENT = 3
class ConversionAdjustmentUploadErrorEnum(object):
class ConversionAdjustmentUploadError(enum.IntEnum):
"""
Enum describing possible conversion adjustment upload errors.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The received error code is not known in this version.
TOO_RECENT_CONVERSION_ACTION (int): The specified conversion action was created too recently.
Please try the upload again after 4-6 hours have passed since the
conversion action was created.
INVALID_CONVERSION_ACTION (int): No conversion action of a supported ConversionActionType that matches the
provided information can be found for the customer.
CONVERSION_ALREADY_RETRACTED (int): A retraction was already reported for this conversion.
CONVERSION_NOT_FOUND (int): A conversion for the supplied combination of conversion
action and conversion identifier could not be found.
CONVERSION_EXPIRED (int): The specified conversion has already expired. Conversions expire after 55
days, after which adjustments cannot be reported against them.
ADJUSTMENT_PRECEDES_CONVERSION (int): The supplied adjustment date time precedes that of the original
conversion.
MORE_RECENT_RESTATEMENT_FOUND (int): A restatement with a more recent adjustment date time was already
reported for this conversion.
TOO_RECENT_CONVERSION (int): The conversion was created too recently.
CANNOT_RESTATE_CONVERSION_ACTION_THAT_ALWAYS_USES_DEFAULT_CONVERSION_VALUE (int): Restatements cannot be reported for a conversion action that always uses
the default value.
"""
UNSPECIFIED = 0
UNKNOWN = 1
TOO_RECENT_CONVERSION_ACTION = 2
INVALID_CONVERSION_ACTION = 3
CONVERSION_ALREADY_RETRACTED = 4
CONVERSION_NOT_FOUND = 5
CONVERSION_EXPIRED = 6
ADJUSTMENT_PRECEDES_CONVERSION = 7
MORE_RECENT_RESTATEMENT_FOUND = 8
TOO_RECENT_CONVERSION = 9
CANNOT_RESTATE_CONVERSION_ACTION_THAT_ALWAYS_USES_DEFAULT_CONVERSION_VALUE = 10
class ConversionAttributionEventTypeEnum(object):
class ConversionAttributionEventType(enum.IntEnum):
"""
The event type of conversions that are attributed to.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Represents value unknown in this version.
IMPRESSION (int): The conversion is attributed to an impression.
INTERACTION (int): The conversion is attributed to an interaction.
"""
UNSPECIFIED = 0
UNKNOWN = 1
IMPRESSION = 2
INTERACTION = 3
class ConversionLagBucketEnum(object):
class ConversionLagBucket(enum.IntEnum):
"""
Enum representing the number of days between impression and conversion.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
LESS_THAN_ONE_DAY (int): Conversion lag bucket from 0 to 1 day. 0 day is included, 1 day is not.
ONE_TO_TWO_DAYS (int): Conversion lag bucket from 1 to 2 days. 1 day is included, 2 days is not.
TWO_TO_THREE_DAYS (int): Conversion lag bucket from 2 to 3 days. 2 days is included,
3 days is not.
THREE_TO_FOUR_DAYS (int): Conversion lag bucket from 3 to 4 days. 3 days is included,
4 days is not.
FOUR_TO_FIVE_DAYS (int): Conversion lag bucket from 4 to 5 days. 4 days is included,
5 days is not.
FIVE_TO_SIX_DAYS (int): Conversion lag bucket from 5 to 6 days. 5 days is included,
6 days is not.
SIX_TO_SEVEN_DAYS (int): Conversion lag bucket from 6 to 7 days. 6 days is included,
7 days is not.
SEVEN_TO_EIGHT_DAYS (int): Conversion lag bucket from 7 to 8 days. 7 days is included,
8 days is not.
EIGHT_TO_NINE_DAYS (int): Conversion lag bucket from 8 to 9 days. 8 days is included,
9 days is not.
NINE_TO_TEN_DAYS (int): Conversion lag bucket from 9 to 10 days. 9 days is included,
10 days is not.
TEN_TO_ELEVEN_DAYS (int): Conversion lag bucket from 10 to 11 days. 10 days is included,
11 days is not.
ELEVEN_TO_TWELVE_DAYS (int): Conversion lag bucket from 11 to 12 days. 11 days is included,
12 days is not.
TWELVE_TO_THIRTEEN_DAYS (int): Conversion lag bucket from 12 to 13 days. 12 days is included,
13 days is not.
THIRTEEN_TO_FOURTEEN_DAYS (int): Conversion lag bucket from 13 to 14 days. 13 days is included,
14 days is not.
FOURTEEN_TO_TWENTY_ONE_DAYS (int): Conversion lag bucket from 14 to 21 days. 14 days is included,
21 days is not.
TWENTY_ONE_TO_THIRTY_DAYS (int): Conversion lag bucket from 21 to 30 days. 21 days is included,
30 days is not.
THIRTY_TO_FORTY_FIVE_DAYS (int): Conversion lag bucket from 30 to 45 days. 30 days is included,
45 days is not.
FORTY_FIVE_TO_SIXTY_DAYS (int): Conversion lag bucket from 45 to 60 days. 45 days is included,
60 days is not.
SIXTY_TO_NINETY_DAYS (int): Conversion lag bucket from 60 to 90 days. 60 days is included,
90 days is not.
"""
UNSPECIFIED = 0
UNKNOWN = 1
LESS_THAN_ONE_DAY = 2
ONE_TO_TWO_DAYS = 3
TWO_TO_THREE_DAYS = 4
THREE_TO_FOUR_DAYS = 5
FOUR_TO_FIVE_DAYS = 6
FIVE_TO_SIX_DAYS = 7
SIX_TO_SEVEN_DAYS = 8
SEVEN_TO_EIGHT_DAYS = 9
EIGHT_TO_NINE_DAYS = 10
NINE_TO_TEN_DAYS = 11
TEN_TO_ELEVEN_DAYS = 12
ELEVEN_TO_TWELVE_DAYS = 13
TWELVE_TO_THIRTEEN_DAYS = 14
THIRTEEN_TO_FOURTEEN_DAYS = 15
FOURTEEN_TO_TWENTY_ONE_DAYS = 16
TWENTY_ONE_TO_THIRTY_DAYS = 17
THIRTY_TO_FORTY_FIVE_DAYS = 18
FORTY_FIVE_TO_SIXTY_DAYS = 19
SIXTY_TO_NINETY_DAYS = 20
class ConversionOrAdjustmentLagBucketEnum(object):
class ConversionOrAdjustmentLagBucket(enum.IntEnum):
"""
Enum representing the number of days between the impression and the
conversion or between the impression and adjustments to the conversion.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
CONVERSION_LESS_THAN_ONE_DAY (int): Conversion lag bucket from 0 to 1 day. 0 day is included, 1 day is not.
CONVERSION_ONE_TO_TWO_DAYS (int): Conversion lag bucket from 1 to 2 days. 1 day is included, 2 days is not.
CONVERSION_TWO_TO_THREE_DAYS (int): Conversion lag bucket from 2 to 3 days. 2 days is included,
3 days is not.
CONVERSION_THREE_TO_FOUR_DAYS (int): Conversion lag bucket from 3 to 4 days. 3 days is included,
4 days is not.
CONVERSION_FOUR_TO_FIVE_DAYS (int): Conversion lag bucket from 4 to 5 days. 4 days is included,
5 days is not.
CONVERSION_FIVE_TO_SIX_DAYS (int): Conversion lag bucket from 5 to 6 days. 5 days is included,
6 days is not.
CONVERSION_SIX_TO_SEVEN_DAYS (int): Conversion lag bucket from 6 to 7 days. 6 days is included,
7 days is not.
CONVERSION_SEVEN_TO_EIGHT_DAYS (int): Conversion lag bucket from 7 to 8 days. 7 days is included,
8 days is not.
CONVERSION_EIGHT_TO_NINE_DAYS (int): Conversion lag bucket from 8 to 9 days. 8 days is included,
9 days is not.
CONVERSION_NINE_TO_TEN_DAYS (int): Conversion lag bucket from 9 to 10 days. 9 days is included,
10 days is not.
CONVERSION_TEN_TO_ELEVEN_DAYS (int): Conversion lag bucket from 10 to 11 days. 10 days is included,
11 days is not.
CONVERSION_ELEVEN_TO_TWELVE_DAYS (int): Conversion lag bucket from 11 to 12 days. 11 days is included,
12 days is not.
CONVERSION_TWELVE_TO_THIRTEEN_DAYS (int): Conversion lag bucket from 12 to 13 days. 12 days is included,
13 days is not.
CONVERSION_THIRTEEN_TO_FOURTEEN_DAYS (int): Conversion lag bucket from 13 to 14 days. 13 days is included,
14 days is not.
CONVERSION_FOURTEEN_TO_TWENTY_ONE_DAYS (int): Conversion lag bucket from 14 to 21 days. 14 days is included,
21 days is not.
CONVERSION_TWENTY_ONE_TO_THIRTY_DAYS (int): Conversion lag bucket from 21 to 30 days. 21 days is included,
30 days is not.
CONVERSION_THIRTY_TO_FORTY_FIVE_DAYS (int): Conversion lag bucket from 30 to 45 days. 30 days is included,
45 days is not.
CONVERSION_FORTY_FIVE_TO_SIXTY_DAYS (int): Conversion lag bucket from 45 to 60 days. 45 days is included,
60 days is not.
CONVERSION_SIXTY_TO_NINETY_DAYS (int): Conversion lag bucket from 60 to 90 days. 60 days is included,
90 days is not.
ADJUSTMENT_LESS_THAN_ONE_DAY (int): Conversion adjustment lag bucket from 0 to 1 day. 0 day is included,
1 day is not.
ADJUSTMENT_ONE_TO_TWO_DAYS (int): Conversion adjustment lag bucket from 1 to 2 days. 1 day is included,
2 days is not.
ADJUSTMENT_TWO_TO_THREE_DAYS (int): Conversion adjustment lag bucket from 2 to 3 days. 2 days is included,
3 days is not.
ADJUSTMENT_THREE_TO_FOUR_DAYS (int): Conversion adjustment lag bucket from 3 to 4 days. 3 days is included,
4 days is not.
ADJUSTMENT_FOUR_TO_FIVE_DAYS (int): Conversion adjustment lag bucket from 4 to 5 days. 4 days is included,
5 days is not.
ADJUSTMENT_FIVE_TO_SIX_DAYS (int): Conversion adjustment lag bucket from 5 to 6 days. 5 days is included,
6 days is not.
ADJUSTMENT_SIX_TO_SEVEN_DAYS (int): Conversion adjustment lag bucket from 6 to 7 days. 6 days is included,
7 days is not.
ADJUSTMENT_SEVEN_TO_EIGHT_DAYS (int): Conversion adjustment lag bucket from 7 to 8 days. 7 days is included,
8 days is not.
ADJUSTMENT_EIGHT_TO_NINE_DAYS (int): Conversion adjustment lag bucket from 8 to 9 days. 8 days is included,
9 days is not.
ADJUSTMENT_NINE_TO_TEN_DAYS (int): Conversion adjustment lag bucket from 9 to 10 days. 9 days is included,
10 days is not.
ADJUSTMENT_TEN_TO_ELEVEN_DAYS (int): Conversion adjustment lag bucket from 10 to 11 days. 10 days is included,
11 days is not.
ADJUSTMENT_ELEVEN_TO_TWELVE_DAYS (int): Conversion adjustment lag bucket from 11 to 12 days. 11 days is included,
12 days is not.
ADJUSTMENT_TWELVE_TO_THIRTEEN_DAYS (int): Conversion adjustment lag bucket from 12 to 13 days. 12 days is included,
13 days is not.
ADJUSTMENT_THIRTEEN_TO_FOURTEEN_DAYS (int): Conversion adjustment lag bucket from 13 to 14 days. 13 days is included,
14 days is not.
ADJUSTMENT_FOURTEEN_TO_TWENTY_ONE_DAYS (int): Conversion adjustment lag bucket from 14 to 21 days. 14 days is included,
21 days is not.
ADJUSTMENT_TWENTY_ONE_TO_THIRTY_DAYS (int): Conversion adjustment lag bucket from 21 to 30 days. 21 days is included,
30 days is not.
ADJUSTMENT_THIRTY_TO_FORTY_FIVE_DAYS (int): Conversion adjustment lag bucket from 30 to 45 days. 30 days is included,
45 days is not.
ADJUSTMENT_FORTY_FIVE_TO_SIXTY_DAYS (int): Conversion adjustment lag bucket from 45 to 60 days. 45 days is included,
60 days is not.
ADJUSTMENT_SIXTY_TO_NINETY_DAYS (int): Conversion adjustment lag bucket from 60 to 90 days. 60 days is included,
90 days is not.
ADJUSTMENT_NINETY_TO_ONE_HUNDRED_AND_FORTY_FIVE_DAYS (int): Conversion adjustment lag bucket from 90 to 145 days. 90 days is
included, 145 days is not.
CONVERSION_UNKNOWN (int): Conversion lag bucket UNKNOWN. This is for dates before conversion lag
bucket was available in Google Ads.
ADJUSTMENT_UNKNOWN (int): Conversion adjustment lag bucket UNKNOWN. This is for dates before
conversion adjustment lag bucket was available in Google Ads.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CONVERSION_LESS_THAN_ONE_DAY = 2
CONVERSION_ONE_TO_TWO_DAYS = 3
CONVERSION_TWO_TO_THREE_DAYS = 4
CONVERSION_THREE_TO_FOUR_DAYS = 5
CONVERSION_FOUR_TO_FIVE_DAYS = 6
CONVERSION_FIVE_TO_SIX_DAYS = 7
CONVERSION_SIX_TO_SEVEN_DAYS = 8
CONVERSION_SEVEN_TO_EIGHT_DAYS = 9
CONVERSION_EIGHT_TO_NINE_DAYS = 10
CONVERSION_NINE_TO_TEN_DAYS = 11
CONVERSION_TEN_TO_ELEVEN_DAYS = 12
CONVERSION_ELEVEN_TO_TWELVE_DAYS = 13
CONVERSION_TWELVE_TO_THIRTEEN_DAYS = 14
CONVERSION_THIRTEEN_TO_FOURTEEN_DAYS = 15
CONVERSION_FOURTEEN_TO_TWENTY_ONE_DAYS = 16
CONVERSION_TWENTY_ONE_TO_THIRTY_DAYS = 17
CONVERSION_THIRTY_TO_FORTY_FIVE_DAYS = 18
CONVERSION_FORTY_FIVE_TO_SIXTY_DAYS = 19
CONVERSION_SIXTY_TO_NINETY_DAYS = 20
ADJUSTMENT_LESS_THAN_ONE_DAY = 21
ADJUSTMENT_ONE_TO_TWO_DAYS = 22
ADJUSTMENT_TWO_TO_THREE_DAYS = 23
ADJUSTMENT_THREE_TO_FOUR_DAYS = 24
ADJUSTMENT_FOUR_TO_FIVE_DAYS = 25
ADJUSTMENT_FIVE_TO_SIX_DAYS = 26
ADJUSTMENT_SIX_TO_SEVEN_DAYS = 27
ADJUSTMENT_SEVEN_TO_EIGHT_DAYS = 28
ADJUSTMENT_EIGHT_TO_NINE_DAYS = 29
ADJUSTMENT_NINE_TO_TEN_DAYS = 30
ADJUSTMENT_TEN_TO_ELEVEN_DAYS = 31
ADJUSTMENT_ELEVEN_TO_TWELVE_DAYS = 32
ADJUSTMENT_TWELVE_TO_THIRTEEN_DAYS = 33
ADJUSTMENT_THIRTEEN_TO_FOURTEEN_DAYS = 34
ADJUSTMENT_FOURTEEN_TO_TWENTY_ONE_DAYS = 35
ADJUSTMENT_TWENTY_ONE_TO_THIRTY_DAYS = 36
ADJUSTMENT_THIRTY_TO_FORTY_FIVE_DAYS = 37
ADJUSTMENT_FORTY_FIVE_TO_SIXTY_DAYS = 38
ADJUSTMENT_SIXTY_TO_NINETY_DAYS = 39
ADJUSTMENT_NINETY_TO_ONE_HUNDRED_AND_FORTY_FIVE_DAYS = 40
CONVERSION_UNKNOWN = 41
ADJUSTMENT_UNKNOWN = 42
class ConversionUploadErrorEnum(object):
class ConversionUploadError(enum.IntEnum):
"""
Enum describing possible conversion upload errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
TOO_MANY_CONVERSIONS_IN_REQUEST (int): The request contained more than 2000 conversions.
UNPARSEABLE_GCLID (int): The specified gclid could not be decoded.
CONVERSION_PRECEDES_GCLID (int): The specified conversion\_date\_time is before the event time associated
with the given gclid.
EXPIRED_GCLID (int): The click associated with the given gclid is either too old to be
imported or occurred outside of the click through lookback window for the
specified conversion action.
TOO_RECENT_GCLID (int): The click associated with the given gclid occurred too recently. Please
try uploading again after 24 hours have passed since the click occurred.
GCLID_NOT_FOUND (int): The click associated with the given gclid could not be found in the
system. This can happen if Google Click IDs are collected for non Google
Ads clicks.
UNAUTHORIZED_CUSTOMER (int): The click associated with the given gclid is owned by a customer
account that the uploading customer does not manage.
INVALID_CONVERSION_ACTION (int): No upload eligible conversion action that matches the provided
information can be found for the customer.
TOO_RECENT_CONVERSION_ACTION (int): The specified conversion action was created too recently.
Please try the upload again after 4-6 hours have passed since the
conversion action was created.
CONVERSION_TRACKING_NOT_ENABLED_AT_IMPRESSION_TIME (int): The click associated with the given gclid does not contain conversion
tracking information.
EXTERNAL_ATTRIBUTION_DATA_SET_FOR_NON_EXTERNALLY_ATTRIBUTED_CONVERSION_ACTION (int): The specified conversion action does not use an external attribution
model, but external\_attribution\_data was set.
EXTERNAL_ATTRIBUTION_DATA_NOT_SET_FOR_EXTERNALLY_ATTRIBUTED_CONVERSION_ACTION (int): The specified conversion action uses an external attribution model, but
external\_attribution\_data or one of its contained fields was not set.
Both external\_attribution\_credit and external\_attribution\_model must
be set for externally attributed conversion actions.
ORDER_ID_NOT_PERMITTED_FOR_EXTERNALLY_ATTRIBUTED_CONVERSION_ACTION (int): Order IDs are not supported for conversion actions which use an external
attribution model.
ORDER_ID_ALREADY_IN_USE (int): A conversion with the same order id and conversion action combination
already exists in our system.
DUPLICATE_ORDER_ID (int): The request contained two or more conversions with the same order id and
conversion action combination.
TOO_RECENT_CALL (int): The call occurred too recently. Please try uploading again after 24 hours
have passed since the call occurred.
EXPIRED_CALL (int): The click that initiated the call is too old for this conversion to be
imported.
CALL_NOT_FOUND (int): The call or the click leading to the call was not found.
CONVERSION_PRECEDES_CALL (int): The specified conversion\_date\_time is before the
call\_start\_date\_time.
CONVERSION_TRACKING_NOT_ENABLED_AT_CALL_TIME (int): The click associated with the call does not contain conversion tracking
information.
UNPARSEABLE_CALLERS_PHONE_NUMBER (int): The callerโs phone number cannot be parsed. It should be formatted either
as E.164 "+16502531234", International "+64 3-331 6005" or US national
number "6502531234".
"""
UNSPECIFIED = 0
UNKNOWN = 1
TOO_MANY_CONVERSIONS_IN_REQUEST = 2
UNPARSEABLE_GCLID = 3
CONVERSION_PRECEDES_GCLID = 4
EXPIRED_GCLID = 5
TOO_RECENT_GCLID = 6
GCLID_NOT_FOUND = 7
UNAUTHORIZED_CUSTOMER = 8
INVALID_CONVERSION_ACTION = 9
TOO_RECENT_CONVERSION_ACTION = 10
CONVERSION_TRACKING_NOT_ENABLED_AT_IMPRESSION_TIME = 11
EXTERNAL_ATTRIBUTION_DATA_SET_FOR_NON_EXTERNALLY_ATTRIBUTED_CONVERSION_ACTION = 12
EXTERNAL_ATTRIBUTION_DATA_NOT_SET_FOR_EXTERNALLY_ATTRIBUTED_CONVERSION_ACTION = 13
ORDER_ID_NOT_PERMITTED_FOR_EXTERNALLY_ATTRIBUTED_CONVERSION_ACTION = 14
ORDER_ID_ALREADY_IN_USE = 15
DUPLICATE_ORDER_ID = 16
TOO_RECENT_CALL = 17
EXPIRED_CALL = 18
CALL_NOT_FOUND = 19
CONVERSION_PRECEDES_CALL = 20
CONVERSION_TRACKING_NOT_ENABLED_AT_CALL_TIME = 21
UNPARSEABLE_CALLERS_PHONE_NUMBER = 22
class CountryCodeErrorEnum(object):
class CountryCodeError(enum.IntEnum):
"""
Enum describing country code errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
INVALID_COUNTRY_CODE (int): The country code is invalid.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_COUNTRY_CODE = 2
class CriterionCategoryChannelAvailabilityModeEnum(object):
class CriterionCategoryChannelAvailabilityMode(enum.IntEnum):
"""
Enum containing the possible CriterionCategoryChannelAvailabilityMode.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ALL_CHANNELS (int): The category is available to campaigns of all channel types and subtypes.
CHANNEL_TYPE_AND_ALL_SUBTYPES (int): The category is available to campaigns of a specific channel type,
including all subtypes under it.
CHANNEL_TYPE_AND_SUBSET_SUBTYPES (int): The category is available to campaigns of a specific channel type and
subtype(s).
"""
UNSPECIFIED = 0
UNKNOWN = 1
ALL_CHANNELS = 2
CHANNEL_TYPE_AND_ALL_SUBTYPES = 3
CHANNEL_TYPE_AND_SUBSET_SUBTYPES = 4
class CriterionCategoryLocaleAvailabilityModeEnum(object):
class CriterionCategoryLocaleAvailabilityMode(enum.IntEnum):
"""
Enum containing the possible CriterionCategoryLocaleAvailabilityMode.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ALL_LOCALES (int): The category is available to campaigns of all locales.
COUNTRY_AND_ALL_LANGUAGES (int): The category is available to campaigns within a list of countries,
regardless of language.
LANGUAGE_AND_ALL_COUNTRIES (int): The category is available to campaigns within a list of languages,
regardless of country.
COUNTRY_AND_LANGUAGE (int): The category is available to campaigns within a list of country, language
pairs.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ALL_LOCALES = 2
COUNTRY_AND_ALL_LANGUAGES = 3
LANGUAGE_AND_ALL_COUNTRIES = 4
COUNTRY_AND_LANGUAGE = 5
class CriterionErrorEnum(object):
class CriterionError(enum.IntEnum):
"""
Enum describing possible criterion errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CONCRETE_TYPE_REQUIRED (int): Concrete type of criterion is required for CREATE and UPDATE operations.
INVALID_EXCLUDED_CATEGORY (int): The category requested for exclusion is invalid.
INVALID_KEYWORD_TEXT (int): Invalid keyword criteria text.
KEYWORD_TEXT_TOO_LONG (int): Keyword text should be less than 80 chars.
KEYWORD_HAS_TOO_MANY_WORDS (int): Keyword text has too many words.
KEYWORD_HAS_INVALID_CHARS (int): Keyword text has invalid characters or symbols.
INVALID_PLACEMENT_URL (int): Invalid placement URL.
INVALID_USER_LIST (int): Invalid user list criterion.
INVALID_USER_INTEREST (int): Invalid user interest criterion.
INVALID_FORMAT_FOR_PLACEMENT_URL (int): Placement URL has wrong format.
PLACEMENT_URL_IS_TOO_LONG (int): Placement URL is too long.
PLACEMENT_URL_HAS_ILLEGAL_CHAR (int): Indicates the URL contains an illegal character.
PLACEMENT_URL_HAS_MULTIPLE_SITES_IN_LINE (int): Indicates the URL contains multiple comma separated URLs.
PLACEMENT_IS_NOT_AVAILABLE_FOR_TARGETING_OR_EXCLUSION (int): Indicates the domain is blacklisted.
INVALID_TOPIC_PATH (int): Invalid topic path.
INVALID_YOUTUBE_CHANNEL_ID (int): The YouTube Channel Id is invalid.
INVALID_YOUTUBE_VIDEO_ID (int): The YouTube Video Id is invalid.
YOUTUBE_VERTICAL_CHANNEL_DEPRECATED (int): Indicates the placement is a YouTube vertical channel, which is no longer
supported.
YOUTUBE_DEMOGRAPHIC_CHANNEL_DEPRECATED (int): Indicates the placement is a YouTube demographic channel, which is no
longer supported.
YOUTUBE_URL_UNSUPPORTED (int): YouTube urls are not supported in Placement criterion. Use YouTubeChannel
and YouTubeVideo criterion instead.
CANNOT_EXCLUDE_CRITERIA_TYPE (int): Criteria type can not be excluded by the customer, like AOL account type
cannot target site type criteria.
CANNOT_ADD_CRITERIA_TYPE (int): Criteria type can not be targeted.
INVALID_PRODUCT_FILTER (int): Product filter in the product criteria has invalid characters. Operand
and the argument in the filter can not have "==" or "&+".
PRODUCT_FILTER_TOO_LONG (int): Product filter in the product criteria is translated to a string as
operand1==argument1&+operand2==argument2, maximum allowed length for the
string is 255 chars.
CANNOT_EXCLUDE_SIMILAR_USER_LIST (int): Not allowed to exclude similar user list.
CANNOT_ADD_CLOSED_USER_LIST (int): Not allowed to target a closed user list.
CANNOT_ADD_DISPLAY_ONLY_LISTS_TO_SEARCH_ONLY_CAMPAIGNS (int): Not allowed to add display only UserLists to search only campaigns.
CANNOT_ADD_DISPLAY_ONLY_LISTS_TO_SEARCH_CAMPAIGNS (int): Not allowed to add display only UserLists to search plus campaigns.
CANNOT_ADD_DISPLAY_ONLY_LISTS_TO_SHOPPING_CAMPAIGNS (int): Not allowed to add display only UserLists to shopping campaigns.
CANNOT_ADD_USER_INTERESTS_TO_SEARCH_CAMPAIGNS (int): Not allowed to add User interests to search only campaigns.
CANNOT_SET_BIDS_ON_CRITERION_TYPE_IN_SEARCH_CAMPAIGNS (int): Not allowed to set bids for this criterion type in search campaigns
CANNOT_ADD_URLS_TO_CRITERION_TYPE_FOR_CAMPAIGN_TYPE (int): Final URLs, URL Templates and CustomParameters cannot be set for the
criterion types of Gender, AgeRange, UserList, Placement, MobileApp, and
MobileAppCategory in search campaigns and shopping campaigns.
INVALID_CUSTOM_AFFINITY (int): Invalid custom affinity criterion.
INVALID_CUSTOM_INTENT (int): Invalid custom intent criterion.
INVALID_IP_ADDRESS (int): IP address is not valid.
INVALID_IP_FORMAT (int): IP format is not valid.
INVALID_MOBILE_APP (int): Mobile application is not valid.
INVALID_MOBILE_APP_CATEGORY (int): Mobile application category is not valid.
INVALID_CRITERION_ID (int): The CriterionId does not exist or is of the incorrect type.
CANNOT_TARGET_CRITERION (int): The Criterion is not allowed to be targeted.
CANNOT_TARGET_OBSOLETE_CRITERION (int): The criterion is not allowed to be targeted as it is deprecated.
CRITERION_ID_AND_TYPE_MISMATCH (int): The CriterionId is not valid for the type.
INVALID_PROXIMITY_RADIUS (int): Distance for the radius for the proximity criterion is invalid.
INVALID_PROXIMITY_RADIUS_UNITS (int): Units for the distance for the radius for the proximity criterion is
invalid.
INVALID_STREETADDRESS_LENGTH (int): Street address in the address is not valid.
INVALID_CITYNAME_LENGTH (int): City name in the address is not valid.
INVALID_REGIONCODE_LENGTH (int): Region code in the address is not valid.
INVALID_REGIONNAME_LENGTH (int): Region name in the address is not valid.
INVALID_POSTALCODE_LENGTH (int): Postal code in the address is not valid.
INVALID_COUNTRY_CODE (int): Country code in the address is not valid.
INVALID_LATITUDE (int): Latitude for the GeoPoint is not valid.
INVALID_LONGITUDE (int): Longitude for the GeoPoint is not valid.
PROXIMITY_GEOPOINT_AND_ADDRESS_BOTH_CANNOT_BE_NULL (int): The Proximity input is not valid. Both address and geoPoint cannot be
null.
INVALID_PROXIMITY_ADDRESS (int): The Proximity address cannot be geocoded to a valid lat/long.
INVALID_USER_DOMAIN_NAME (int): User domain name is not valid.
CRITERION_PARAMETER_TOO_LONG (int): Length of serialized criterion parameter exceeded size limit.
AD_SCHEDULE_TIME_INTERVALS_OVERLAP (int): Time interval in the AdSchedule overlaps with another AdSchedule.
AD_SCHEDULE_INTERVAL_CANNOT_SPAN_MULTIPLE_DAYS (int): AdSchedule time interval cannot span multiple days.
AD_SCHEDULE_INVALID_TIME_INTERVAL (int): AdSchedule time interval specified is invalid, endTime cannot be earlier
than startTime.
AD_SCHEDULE_EXCEEDED_INTERVALS_PER_DAY_LIMIT (int): The number of AdSchedule entries in a day exceeds the limit.
AD_SCHEDULE_CRITERION_ID_MISMATCHING_FIELDS (int): CriteriaId does not match the interval of the AdSchedule specified.
CANNOT_BID_MODIFY_CRITERION_TYPE (int): Cannot set bid modifier for this criterion type.
CANNOT_BID_MODIFY_CRITERION_CAMPAIGN_OPTED_OUT (int): Cannot bid modify criterion, since it is opted out of the campaign.
CANNOT_BID_MODIFY_NEGATIVE_CRITERION (int): Cannot set bid modifier for a negative criterion.
BID_MODIFIER_ALREADY_EXISTS (int): Bid Modifier already exists. Use SET operation to update.
FEED_ID_NOT_ALLOWED (int): Feed Id is not allowed in these Location Groups.
ACCOUNT_INELIGIBLE_FOR_CRITERIA_TYPE (int): The account may not use the requested criteria type. For example, some
accounts are restricted to keywords only.
CRITERIA_TYPE_INVALID_FOR_BIDDING_STRATEGY (int): The requested criteria type cannot be used with campaign or ad group
bidding strategy.
CANNOT_EXCLUDE_CRITERION (int): The Criterion is not allowed to be excluded.
CANNOT_REMOVE_CRITERION (int): The criterion is not allowed to be removed. For example, we cannot remove
any of the device criterion.
PRODUCT_SCOPE_TOO_LONG (int): The combined length of product dimension values of the product scope
criterion is too long.
PRODUCT_SCOPE_TOO_MANY_DIMENSIONS (int): Product scope contains too many dimensions.
PRODUCT_PARTITION_TOO_LONG (int): The combined length of product dimension values of the product partition
criterion is too long.
PRODUCT_PARTITION_TOO_MANY_DIMENSIONS (int): Product partition contains too many dimensions.
INVALID_PRODUCT_DIMENSION (int): The product dimension is invalid (e.g. dimension contains illegal value,
dimension type is represented with wrong class, etc). Product dimension
value can not contain "==" or "&+".
INVALID_PRODUCT_DIMENSION_TYPE (int): Product dimension type is either invalid for campaigns of this type or
cannot be used in the current context. BIDDING\_CATEGORY\_Lx and
PRODUCT\_TYPE\_Lx product dimensions must be used in ascending order of
their levels: L1, L2, L3, L4, L5... The levels must be specified
sequentially and start from L1. Furthermore, an "others" product
partition cannot be subdivided with a dimension of the same type but of
a higher level ("others" BIDDING\_CATEGORY\_L3 can be subdivided with
BRAND but not with BIDDING\_CATEGORY\_L4).
INVALID_PRODUCT_BIDDING_CATEGORY (int): Bidding categories do not form a valid path in the Shopping bidding
category taxonomy.
MISSING_SHOPPING_SETTING (int): ShoppingSetting must be added to the campaign before ProductScope
criteria can be added.
INVALID_MATCHING_FUNCTION (int): Matching function is invalid.
LOCATION_FILTER_NOT_ALLOWED (int): Filter parameters not allowed for location groups targeting.
LOCATION_FILTER_INVALID (int): Given location filter parameter is invalid for location groups targeting.
CANNOT_ATTACH_CRITERIA_AT_CAMPAIGN_AND_ADGROUP (int): Criteria type cannot be associated with a campaign and its ad group(s)
simultaneously.
HOTEL_LENGTH_OF_STAY_OVERLAPS_WITH_EXISTING_CRITERION (int): Range represented by hotel length of stay's min nights and max nights
overlaps with an existing criterion.
HOTEL_ADVANCE_BOOKING_WINDOW_OVERLAPS_WITH_EXISTING_CRITERION (int): Range represented by hotel advance booking window's min days and max days
overlaps with an existing criterion.
FIELD_INCOMPATIBLE_WITH_NEGATIVE_TARGETING (int): The field is not allowed to be set when the negative field is set to
true, e.g. we don't allow bids in negative ad group or campaign criteria.
INVALID_WEBPAGE_CONDITION (int): The combination of operand and operator in webpage condition is invalid.
INVALID_WEBPAGE_CONDITION_URL (int): The URL of webpage condition is invalid.
WEBPAGE_CONDITION_URL_CANNOT_BE_EMPTY (int): The URL of webpage condition cannot be empty or contain white space.
WEBPAGE_CONDITION_URL_UNSUPPORTED_PROTOCOL (int): The URL of webpage condition contains an unsupported protocol.
WEBPAGE_CONDITION_URL_CANNOT_BE_IP_ADDRESS (int): The URL of webpage condition cannot be an IP address.
WEBPAGE_CONDITION_URL_DOMAIN_NOT_CONSISTENT_WITH_CAMPAIGN_SETTING (int): The domain of the URL is not consistent with the domain in campaign
setting.
WEBPAGE_CONDITION_URL_CANNOT_BE_PUBLIC_SUFFIX (int): The URL of webpage condition cannot be a public suffix itself.
WEBPAGE_CONDITION_URL_INVALID_PUBLIC_SUFFIX (int): The URL of webpage condition has an invalid public suffix.
WEBPAGE_CONDITION_URL_VALUE_TRACK_VALUE_NOT_SUPPORTED (int): Value track parameter is not supported in webpage condition URL.
WEBPAGE_CRITERION_URL_EQUALS_CAN_HAVE_ONLY_ONE_CONDITION (int): Only one URL-EQUALS webpage condition is allowed in a webpage
criterion and it cannot be combined with other conditions.
WEBPAGE_CRITERION_NOT_SUPPORTED_ON_NON_DSA_AD_GROUP (int): A webpage criterion cannot be added to a non-DSA ad group.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CONCRETE_TYPE_REQUIRED = 2
INVALID_EXCLUDED_CATEGORY = 3
INVALID_KEYWORD_TEXT = 4
KEYWORD_TEXT_TOO_LONG = 5
KEYWORD_HAS_TOO_MANY_WORDS = 6
KEYWORD_HAS_INVALID_CHARS = 7
INVALID_PLACEMENT_URL = 8
INVALID_USER_LIST = 9
INVALID_USER_INTEREST = 10
INVALID_FORMAT_FOR_PLACEMENT_URL = 11
PLACEMENT_URL_IS_TOO_LONG = 12
PLACEMENT_URL_HAS_ILLEGAL_CHAR = 13
PLACEMENT_URL_HAS_MULTIPLE_SITES_IN_LINE = 14
PLACEMENT_IS_NOT_AVAILABLE_FOR_TARGETING_OR_EXCLUSION = 15
INVALID_TOPIC_PATH = 16
INVALID_YOUTUBE_CHANNEL_ID = 17
INVALID_YOUTUBE_VIDEO_ID = 18
YOUTUBE_VERTICAL_CHANNEL_DEPRECATED = 19
YOUTUBE_DEMOGRAPHIC_CHANNEL_DEPRECATED = 20
YOUTUBE_URL_UNSUPPORTED = 21
CANNOT_EXCLUDE_CRITERIA_TYPE = 22
CANNOT_ADD_CRITERIA_TYPE = 23
INVALID_PRODUCT_FILTER = 24
PRODUCT_FILTER_TOO_LONG = 25
CANNOT_EXCLUDE_SIMILAR_USER_LIST = 26
CANNOT_ADD_CLOSED_USER_LIST = 27
CANNOT_ADD_DISPLAY_ONLY_LISTS_TO_SEARCH_ONLY_CAMPAIGNS = 28
CANNOT_ADD_DISPLAY_ONLY_LISTS_TO_SEARCH_CAMPAIGNS = 29
CANNOT_ADD_DISPLAY_ONLY_LISTS_TO_SHOPPING_CAMPAIGNS = 30
CANNOT_ADD_USER_INTERESTS_TO_SEARCH_CAMPAIGNS = 31
CANNOT_SET_BIDS_ON_CRITERION_TYPE_IN_SEARCH_CAMPAIGNS = 32
CANNOT_ADD_URLS_TO_CRITERION_TYPE_FOR_CAMPAIGN_TYPE = 33
INVALID_CUSTOM_AFFINITY = 96
INVALID_CUSTOM_INTENT = 97
INVALID_IP_ADDRESS = 34
INVALID_IP_FORMAT = 35
INVALID_MOBILE_APP = 36
INVALID_MOBILE_APP_CATEGORY = 37
INVALID_CRITERION_ID = 38
CANNOT_TARGET_CRITERION = 39
CANNOT_TARGET_OBSOLETE_CRITERION = 40
CRITERION_ID_AND_TYPE_MISMATCH = 41
INVALID_PROXIMITY_RADIUS = 42
INVALID_PROXIMITY_RADIUS_UNITS = 43
INVALID_STREETADDRESS_LENGTH = 44
INVALID_CITYNAME_LENGTH = 45
INVALID_REGIONCODE_LENGTH = 46
INVALID_REGIONNAME_LENGTH = 47
INVALID_POSTALCODE_LENGTH = 48
INVALID_COUNTRY_CODE = 49
INVALID_LATITUDE = 50
INVALID_LONGITUDE = 51
PROXIMITY_GEOPOINT_AND_ADDRESS_BOTH_CANNOT_BE_NULL = 52
INVALID_PROXIMITY_ADDRESS = 53
INVALID_USER_DOMAIN_NAME = 54
CRITERION_PARAMETER_TOO_LONG = 55
AD_SCHEDULE_TIME_INTERVALS_OVERLAP = 56
AD_SCHEDULE_INTERVAL_CANNOT_SPAN_MULTIPLE_DAYS = 57
AD_SCHEDULE_INVALID_TIME_INTERVAL = 58
AD_SCHEDULE_EXCEEDED_INTERVALS_PER_DAY_LIMIT = 59
AD_SCHEDULE_CRITERION_ID_MISMATCHING_FIELDS = 60
CANNOT_BID_MODIFY_CRITERION_TYPE = 61
CANNOT_BID_MODIFY_CRITERION_CAMPAIGN_OPTED_OUT = 62
CANNOT_BID_MODIFY_NEGATIVE_CRITERION = 63
BID_MODIFIER_ALREADY_EXISTS = 64
FEED_ID_NOT_ALLOWED = 65
ACCOUNT_INELIGIBLE_FOR_CRITERIA_TYPE = 66
CRITERIA_TYPE_INVALID_FOR_BIDDING_STRATEGY = 67
CANNOT_EXCLUDE_CRITERION = 68
CANNOT_REMOVE_CRITERION = 69
PRODUCT_SCOPE_TOO_LONG = 70
PRODUCT_SCOPE_TOO_MANY_DIMENSIONS = 71
PRODUCT_PARTITION_TOO_LONG = 72
PRODUCT_PARTITION_TOO_MANY_DIMENSIONS = 73
INVALID_PRODUCT_DIMENSION = 74
INVALID_PRODUCT_DIMENSION_TYPE = 75
INVALID_PRODUCT_BIDDING_CATEGORY = 76
MISSING_SHOPPING_SETTING = 77
INVALID_MATCHING_FUNCTION = 78
LOCATION_FILTER_NOT_ALLOWED = 79
LOCATION_FILTER_INVALID = 80
CANNOT_ATTACH_CRITERIA_AT_CAMPAIGN_AND_ADGROUP = 81
HOTEL_LENGTH_OF_STAY_OVERLAPS_WITH_EXISTING_CRITERION = 82
HOTEL_ADVANCE_BOOKING_WINDOW_OVERLAPS_WITH_EXISTING_CRITERION = 83
FIELD_INCOMPATIBLE_WITH_NEGATIVE_TARGETING = 84
INVALID_WEBPAGE_CONDITION = 85
INVALID_WEBPAGE_CONDITION_URL = 86
WEBPAGE_CONDITION_URL_CANNOT_BE_EMPTY = 87
WEBPAGE_CONDITION_URL_UNSUPPORTED_PROTOCOL = 88
WEBPAGE_CONDITION_URL_CANNOT_BE_IP_ADDRESS = 89
WEBPAGE_CONDITION_URL_DOMAIN_NOT_CONSISTENT_WITH_CAMPAIGN_SETTING = 90
WEBPAGE_CONDITION_URL_CANNOT_BE_PUBLIC_SUFFIX = 91
WEBPAGE_CONDITION_URL_INVALID_PUBLIC_SUFFIX = 92
WEBPAGE_CONDITION_URL_VALUE_TRACK_VALUE_NOT_SUPPORTED = 93
WEBPAGE_CRITERION_URL_EQUALS_CAN_HAVE_ONLY_ONE_CONDITION = 94
WEBPAGE_CRITERION_NOT_SUPPORTED_ON_NON_DSA_AD_GROUP = 95
class CriterionSystemServingStatusEnum(object):
class CriterionSystemServingStatus(enum.IntEnum):
"""
Enumerates criterion system serving statuses.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
ELIGIBLE (int): Eligible.
RARELY_SERVED (int): Low search volume.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ELIGIBLE = 2
RARELY_SERVED = 3
class CriterionTypeEnum(object):
class CriterionType(enum.IntEnum):
"""
Enum describing possible criterion types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
KEYWORD (int): Keyword. e.g. 'mars cruise'.
PLACEMENT (int): Placement, aka Website. e.g. 'www.flowers4sale.com'
MOBILE_APP_CATEGORY (int): Mobile application categories to target.
MOBILE_APPLICATION (int): Mobile applications to target.
DEVICE (int): Devices to target.
LOCATION (int): Locations to target.
LISTING_GROUP (int): Listing groups to target.
AD_SCHEDULE (int): Ad Schedule.
AGE_RANGE (int): Age range.
GENDER (int): Gender.
INCOME_RANGE (int): Income Range.
PARENTAL_STATUS (int): Parental status.
YOUTUBE_VIDEO (int): YouTube Video.
YOUTUBE_CHANNEL (int): YouTube Channel.
USER_LIST (int): User list.
PROXIMITY (int): Proximity.
TOPIC (int): A topic target on the display network (e.g. "Pets & Animals").
LISTING_SCOPE (int): Listing scope to target.
LANGUAGE (int): Language.
IP_BLOCK (int): IpBlock.
CONTENT_LABEL (int): Content Label for category exclusion.
CARRIER (int): Carrier.
USER_INTEREST (int): A category the user is interested in.
WEBPAGE (int): Webpage criterion for dynamic search ads.
OPERATING_SYSTEM_VERSION (int): Operating system version.
APP_PAYMENT_MODEL (int): App payment model.
MOBILE_DEVICE (int): Mobile device.
CUSTOM_AFFINITY (int): Custom affinity.
CUSTOM_INTENT (int): Custom intent.
"""
UNSPECIFIED = 0
UNKNOWN = 1
KEYWORD = 2
PLACEMENT = 3
MOBILE_APP_CATEGORY = 4
MOBILE_APPLICATION = 5
DEVICE = 6
LOCATION = 7
LISTING_GROUP = 8
AD_SCHEDULE = 9
AGE_RANGE = 10
GENDER = 11
INCOME_RANGE = 12
PARENTAL_STATUS = 13
YOUTUBE_VIDEO = 14
YOUTUBE_CHANNEL = 15
USER_LIST = 16
PROXIMITY = 17
TOPIC = 18
LISTING_SCOPE = 19
LANGUAGE = 20
IP_BLOCK = 21
CONTENT_LABEL = 22
CARRIER = 23
USER_INTEREST = 24
WEBPAGE = 25
OPERATING_SYSTEM_VERSION = 26
APP_PAYMENT_MODEL = 27
MOBILE_DEVICE = 28
CUSTOM_AFFINITY = 29
CUSTOM_INTENT = 30
class CustomInterestErrorEnum(object):
class CustomInterestError(enum.IntEnum):
"""
Enum describing possible custom interest errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
NAME_ALREADY_USED (int): Duplicate custom interest name ignoring case.
CUSTOM_INTEREST_MEMBER_ID_AND_TYPE_PARAMETER_NOT_PRESENT_IN_REMOVE (int): In the remove custom interest member operation, both member ID and pair
[type, parameter] are not present.
TYPE_AND_PARAMETER_NOT_FOUND (int): The pair of [type, parameter] does not exist.
TYPE_AND_PARAMETER_ALREADY_EXISTED (int): The pair of [type, parameter] already exists.
INVALID_CUSTOM_INTEREST_MEMBER_TYPE (int): Unsupported custom interest member type.
CANNOT_REMOVE_WHILE_IN_USE (int): Cannot remove a custom interest while it's still being targeted.
CANNOT_CHANGE_TYPE (int): Cannot mutate custom interest type.
"""
UNSPECIFIED = 0
UNKNOWN = 1
NAME_ALREADY_USED = 2
CUSTOM_INTEREST_MEMBER_ID_AND_TYPE_PARAMETER_NOT_PRESENT_IN_REMOVE = 3
TYPE_AND_PARAMETER_NOT_FOUND = 4
TYPE_AND_PARAMETER_ALREADY_EXISTED = 5
INVALID_CUSTOM_INTEREST_MEMBER_TYPE = 6
CANNOT_REMOVE_WHILE_IN_USE = 7
CANNOT_CHANGE_TYPE = 8
class CustomInterestMemberTypeEnum(object):
class CustomInterestMemberType(enum.IntEnum):
"""
Enum containing possible custom interest member types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
KEYWORD (int): Custom interest member type KEYWORD.
URL (int): Custom interest member type URL.
"""
UNSPECIFIED = 0
UNKNOWN = 1
KEYWORD = 2
URL = 3
class CustomInterestStatusEnum(object):
class CustomInterestStatus(enum.IntEnum):
"""
Enum containing possible custom interest types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ENABLED (int): Enabled status - custom interest is enabled and can be targeted to.
REMOVED (int): Removed status - custom interest is removed and cannot be used for
targeting.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
class CustomInterestTypeEnum(object):
class CustomInterestType(enum.IntEnum):
"""
Enum containing possible custom interest types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
CUSTOM_AFFINITY (int): Allows brand advertisers to define custom affinity audience lists.
CUSTOM_INTENT (int): Allows advertisers to define custom intent audience lists.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CUSTOM_AFFINITY = 2
CUSTOM_INTENT = 3
class CustomPlaceholderFieldEnum(object):
class CustomPlaceholderField(enum.IntEnum):
"""
Possible values for Custom placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ID (int): Data Type: STRING. Required. Combination ID and ID2 must be unique per
offer.
ID2 (int): Data Type: STRING. Combination ID and ID2 must be unique per offer.
ITEM_TITLE (int): Data Type: STRING. Required. Main headline with product name to be shown
in dynamic ad.
ITEM_SUBTITLE (int): Data Type: STRING. Optional text to be shown in the image ad.
ITEM_DESCRIPTION (int): Data Type: STRING. Optional description of the product to be shown in the
ad.
ITEM_ADDRESS (int): Data Type: STRING. Full address of your offer or service, including
postal code. This will be used to identify the closest product to the
user when there are multiple offers in the feed that are relevant to the
user.
PRICE (int): Data Type: STRING. Price to be shown in the ad.
Example: "100.00 USD"
FORMATTED_PRICE (int): Data Type: STRING. Formatted price to be shown in the ad.
Example: "Starting at $100.00 USD", "$80 - $100"
SALE_PRICE (int): Data Type: STRING. Sale price to be shown in the ad.
Example: "80.00 USD"
FORMATTED_SALE_PRICE (int): Data Type: STRING. Formatted sale price to be shown in the ad.
Example: "On sale for $80.00", "$60 - $80"
IMAGE_URL (int): Data Type: URL. Image to be displayed in the ad. Highly recommended for
image ads.
ITEM_CATEGORY (int): Data Type: STRING. Used as a recommendation engine signal to serve items
in the same category.
FINAL_URLS (int): Data Type: URL\_LIST. Final URLs for the ad when using Upgraded URLs.
User will be redirected to these URLs when they click on an ad, or when
they click on a specific product for ads that have multiple products.
FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. Final mobile URLs for the ad when using Upgraded
URLs.
TRACKING_URL (int): Data Type: URL. Tracking template for the ad when using Upgraded URLs.
CONTEXTUAL_KEYWORDS (int): Data Type: STRING\_LIST. Keywords used for product retrieval.
ANDROID_APP_LINK (int): Data Type: STRING. Android app link. Must be formatted as:
android-app://{package\_id}/{scheme}/{host\_path}. The components are
defined as follows: package\_id: app ID as specified in Google Play.
scheme: the scheme to pass to the application. Can be HTTP, or a custom
scheme. host\_path: identifies the specific content within your
application.
SIMILAR_IDS (int): Data Type: STRING\_LIST. List of recommended IDs to show together with
this item.
IOS_APP_LINK (int): Data Type: STRING. iOS app link.
IOS_APP_STORE_ID (int): Data Type: INT64. iOS app store ID.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ID = 2
ID2 = 3
ITEM_TITLE = 4
ITEM_SUBTITLE = 5
ITEM_DESCRIPTION = 6
ITEM_ADDRESS = 7
PRICE = 8
FORMATTED_PRICE = 9
SALE_PRICE = 10
FORMATTED_SALE_PRICE = 11
IMAGE_URL = 12
ITEM_CATEGORY = 13
FINAL_URLS = 14
FINAL_MOBILE_URLS = 15
TRACKING_URL = 16
CONTEXTUAL_KEYWORDS = 17
ANDROID_APP_LINK = 18
SIMILAR_IDS = 19
IOS_APP_LINK = 20
IOS_APP_STORE_ID = 21
class CustomerClientLinkErrorEnum(object):
class CustomerClientLinkError(enum.IntEnum):
"""
Enum describing possible CustomerClientLink errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CLIENT_ALREADY_INVITED_BY_THIS_MANAGER (int): Trying to manage a client that already in being managed by customer.
CLIENT_ALREADY_MANAGED_IN_HIERARCHY (int): Already managed by some other manager in the hierarchy.
CYCLIC_LINK_NOT_ALLOWED (int): Attempt to create a cycle in the hierarchy.
CUSTOMER_HAS_TOO_MANY_ACCOUNTS (int): Managed accounts has the maximum number of linked accounts.
CLIENT_HAS_TOO_MANY_INVITATIONS (int): Invitor has the maximum pending invitations.
CANNOT_HIDE_OR_UNHIDE_MANAGER_ACCOUNTS (int): Attempt to change hidden status of a link that is not active.
CUSTOMER_HAS_TOO_MANY_ACCOUNTS_AT_MANAGER (int): Parent manager account has the maximum number of linked accounts.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CLIENT_ALREADY_INVITED_BY_THIS_MANAGER = 2
CLIENT_ALREADY_MANAGED_IN_HIERARCHY = 3
CYCLIC_LINK_NOT_ALLOWED = 4
CUSTOMER_HAS_TOO_MANY_ACCOUNTS = 5
CLIENT_HAS_TOO_MANY_INVITATIONS = 6
CANNOT_HIDE_OR_UNHIDE_MANAGER_ACCOUNTS = 7
CUSTOMER_HAS_TOO_MANY_ACCOUNTS_AT_MANAGER = 8
class CustomerErrorEnum(object):
class CustomerError(enum.IntEnum):
"""
Set of errors that are related to requests dealing with Customer.
Next id: 26
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
STATUS_CHANGE_DISALLOWED (int): Customer status is not allowed to be changed from DRAFT and CLOSED.
Currency code and at least one of country code and time zone needs to be
set when status is changed to ENABLED.
ACCOUNT_NOT_SET_UP (int): CustomerService cannot get a customer that has not been fully set up.
"""
UNSPECIFIED = 0
UNKNOWN = 1
STATUS_CHANGE_DISALLOWED = 2
ACCOUNT_NOT_SET_UP = 3
class CustomerFeedErrorEnum(object):
class CustomerFeedError(enum.IntEnum):
"""
Enum describing possible customer feed errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE (int): An active feed already exists for this customer and place holder type.
CANNOT_CREATE_FOR_REMOVED_FEED (int): The specified feed is removed.
CANNOT_CREATE_ALREADY_EXISTING_CUSTOMER_FEED (int): The CustomerFeed already exists. Update should be used to modify the
existing CustomerFeed.
CANNOT_MODIFY_REMOVED_CUSTOMER_FEED (int): Cannot update removed customer feed.
INVALID_PLACEHOLDER_TYPE (int): Invalid placeholder type.
MISSING_FEEDMAPPING_FOR_PLACEHOLDER_TYPE (int): Feed mapping for this placeholder type does not exist.
PLACEHOLDER_TYPE_NOT_ALLOWED_ON_CUSTOMER_FEED (int): Placeholder not allowed at the account level.
"""
UNSPECIFIED = 0
UNKNOWN = 1
FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE = 2
CANNOT_CREATE_FOR_REMOVED_FEED = 3
CANNOT_CREATE_ALREADY_EXISTING_CUSTOMER_FEED = 4
CANNOT_MODIFY_REMOVED_CUSTOMER_FEED = 5
INVALID_PLACEHOLDER_TYPE = 6
MISSING_FEEDMAPPING_FOR_PLACEHOLDER_TYPE = 7
PLACEHOLDER_TYPE_NOT_ALLOWED_ON_CUSTOMER_FEED = 8
class CustomerManagerLinkErrorEnum(object):
class CustomerManagerLinkError(enum.IntEnum):
"""
Enum describing possible CustomerManagerLink errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
NO_PENDING_INVITE (int): No pending invitation.
SAME_CLIENT_MORE_THAN_ONCE_PER_CALL (int): Attempt to operate on the same client more than once in the same call.
MANAGER_HAS_MAX_NUMBER_OF_LINKED_ACCOUNTS (int): Manager account has the maximum number of linked accounts.
CANNOT_UNLINK_ACCOUNT_WITHOUT_ACTIVE_USER (int): If no active user on account it cannot be unlinked from its manager.
CANNOT_REMOVE_LAST_CLIENT_ACCOUNT_OWNER (int): Account should have at least one active owner on it before being
unlinked.
CANNOT_CHANGE_ROLE_BY_NON_ACCOUNT_OWNER (int): Only account owners may change their permission role.
CANNOT_CHANGE_ROLE_FOR_NON_ACTIVE_LINK_ACCOUNT (int): When a client's link to its manager is not active, the link role cannot
be changed.
DUPLICATE_CHILD_FOUND (int): Attempt to link a child to a parent that contains or will contain
duplicate children.
"""
UNSPECIFIED = 0
UNKNOWN = 1
NO_PENDING_INVITE = 2
SAME_CLIENT_MORE_THAN_ONCE_PER_CALL = 3
MANAGER_HAS_MAX_NUMBER_OF_LINKED_ACCOUNTS = 4
CANNOT_UNLINK_ACCOUNT_WITHOUT_ACTIVE_USER = 5
CANNOT_REMOVE_LAST_CLIENT_ACCOUNT_OWNER = 6
CANNOT_CHANGE_ROLE_BY_NON_ACCOUNT_OWNER = 7
CANNOT_CHANGE_ROLE_FOR_NON_ACTIVE_LINK_ACCOUNT = 8
DUPLICATE_CHILD_FOUND = 9
class CustomerMatchUploadKeyTypeEnum(object):
class CustomerMatchUploadKeyType(enum.IntEnum):
"""
Enum describing possible customer match upload key types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
CONTACT_INFO (int): Members are matched from customer info such as email address, phone
number or physical address.
CRM_ID (int): Members are matched from a user id generated and assigned by the
advertiser.
MOBILE_ADVERTISING_ID (int): Members are matched from mobile advertising ids.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CONTACT_INFO = 2
CRM_ID = 3
MOBILE_ADVERTISING_ID = 4
class DataDrivenModelStatusEnum(object):
class DataDrivenModelStatus(enum.IntEnum):
"""
Enumerates data driven model statuses.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
AVAILABLE (int): The data driven model is available.
STALE (int): The data driven model is stale. It hasn't been updated for at least 7
days. It is still being used, but will become expired if it does not get
updated for 30 days.
EXPIRED (int): The data driven model expired. It hasn't been updated for at least 30
days and cannot be used. Most commonly this is because there hasn't been
the required number of events in a recent 30-day period.
NEVER_GENERATED (int): The data driven model has never been generated. Most commonly this is
because there has never been the required number of events in any 30-day
period.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AVAILABLE = 2
STALE = 3
EXPIRED = 4
NEVER_GENERATED = 5
class DatabaseErrorEnum(object):
class DatabaseError(enum.IntEnum):
"""
Enum describing possible database errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CONCURRENT_MODIFICATION (int): Multiple requests were attempting to modify the same resource at once.
Please retry the request.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CONCURRENT_MODIFICATION = 2
class DateErrorEnum(object):
class DateError(enum.IntEnum):
"""
Enum describing possible date errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
INVALID_FIELD_VALUES_IN_DATE (int): Given field values do not correspond to a valid date.
INVALID_FIELD_VALUES_IN_DATE_TIME (int): Given field values do not correspond to a valid date time.
INVALID_STRING_DATE (int): The string date's format should be yyyy-mm-dd.
INVALID_STRING_DATE_TIME_MICROS (int): The string date time's format should be yyyy-mm-dd hh:mm:ss.ssssss.
INVALID_STRING_DATE_TIME_SECONDS (int): The string date time's format should be yyyy-mm-dd hh:mm:ss.
INVALID_STRING_DATE_TIME_SECONDS_WITH_OFFSET (int): The string date time's format should be yyyy-mm-dd hh:mm:ss+|-hh:mm.
EARLIER_THAN_MINIMUM_DATE (int): Date is before allowed minimum.
LATER_THAN_MAXIMUM_DATE (int): Date is after allowed maximum.
DATE_RANGE_MINIMUM_DATE_LATER_THAN_MAXIMUM_DATE (int): Date range bounds are not in order.
DATE_RANGE_MINIMUM_AND_MAXIMUM_DATES_BOTH_NULL (int): Both dates in range are null.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_FIELD_VALUES_IN_DATE = 2
INVALID_FIELD_VALUES_IN_DATE_TIME = 3
INVALID_STRING_DATE = 4
INVALID_STRING_DATE_TIME_MICROS = 6
INVALID_STRING_DATE_TIME_SECONDS = 11
INVALID_STRING_DATE_TIME_SECONDS_WITH_OFFSET = 12
EARLIER_THAN_MINIMUM_DATE = 7
LATER_THAN_MAXIMUM_DATE = 8
DATE_RANGE_MINIMUM_DATE_LATER_THAN_MAXIMUM_DATE = 9
DATE_RANGE_MINIMUM_AND_MAXIMUM_DATES_BOTH_NULL = 10
class DateRangeErrorEnum(object):
class DateRangeError(enum.IntEnum):
"""
Enum describing possible date range errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
INVALID_DATE (int): Invalid date.
START_DATE_AFTER_END_DATE (int): The start date was after the end date.
CANNOT_SET_DATE_TO_PAST (int): Cannot set date to past time
AFTER_MAXIMUM_ALLOWABLE_DATE (int): A date was used that is past the system "last" date.
CANNOT_MODIFY_START_DATE_IF_ALREADY_STARTED (int): Trying to change start date on a campaign that has started.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_DATE = 2
START_DATE_AFTER_END_DATE = 3
CANNOT_SET_DATE_TO_PAST = 4
AFTER_MAXIMUM_ALLOWABLE_DATE = 5
CANNOT_MODIFY_START_DATE_IF_ALREADY_STARTED = 6
class DayOfWeekEnum(object):
class DayOfWeek(enum.IntEnum):
"""
Enumerates days of the week, e.g., "Monday".
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
MONDAY (int): Monday.
TUESDAY (int): Tuesday.
WEDNESDAY (int): Wednesday.
THURSDAY (int): Thursday.
FRIDAY (int): Friday.
SATURDAY (int): Saturday.
SUNDAY (int): Sunday.
"""
UNSPECIFIED = 0
UNKNOWN = 1
MONDAY = 2
TUESDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
SUNDAY = 8
class DeviceEnum(object):
class Device(enum.IntEnum):
"""
Enumerates Google Ads devices available for targeting.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
MOBILE (int): Mobile devices with full browsers.
TABLET (int): Tablets with full browsers.
DESKTOP (int): Computers.
OTHER (int): Other device types.
"""
UNSPECIFIED = 0
UNKNOWN = 1
MOBILE = 2
TABLET = 3
DESKTOP = 4
OTHER = 5
class DisplayAdFormatSettingEnum(object):
class DisplayAdFormatSetting(enum.IntEnum):
"""
Enumerates display ad format settings.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
ALL_FORMATS (int): Text, image and native formats.
NON_NATIVE (int): Text and image formats.
NATIVE (int): Native format, i.e. the format rendering is controlled by the publisher
and not by Google.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ALL_FORMATS = 2
NON_NATIVE = 3
NATIVE = 4
class DistinctErrorEnum(object):
class DistinctError(enum.IntEnum):
"""
Enum describing possible distinct errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
DUPLICATE_ELEMENT (int): Duplicate element.
DUPLICATE_TYPE (int): Duplicate type.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DUPLICATE_ELEMENT = 2
DUPLICATE_TYPE = 3
class DsaPageFeedCriterionFieldEnum(object):
class DsaPageFeedCriterionField(enum.IntEnum):
"""
Possible values for Dynamic Search Ad Page Feed criterion fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PAGE_URL (int): Data Type: URL or URL\_LIST. URL of the web page you want to target.
LABEL (int): Data Type: STRING\_LIST. The labels that will help you target ads within
your page feed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PAGE_URL = 2
LABEL = 3
class EducationPlaceholderFieldEnum(object):
class EducationPlaceholderField(enum.IntEnum):
"""
Possible values for Education placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PROGRAM_ID (int): Data Type: STRING. Required. Combination of PROGRAM ID and LOCATION ID
must be unique per offer.
LOCATION_ID (int): Data Type: STRING. Combination of PROGRAM ID and LOCATION ID must be
unique per offer.
PROGRAM_NAME (int): Data Type: STRING. Required. Main headline with program name to be shown
in dynamic ad.
AREA_OF_STUDY (int): Data Type: STRING. Area of study that can be shown in dynamic ad.
PROGRAM_DESCRIPTION (int): Data Type: STRING. Description of program that can be shown in dynamic
ad.
SCHOOL_NAME (int): Data Type: STRING. Name of school that can be shown in dynamic ad.
ADDRESS (int): Data Type: STRING. Complete school address, including postal code.
THUMBNAIL_IMAGE_URL (int): Data Type: URL. Image to be displayed in ads.
ALTERNATIVE_THUMBNAIL_IMAGE_URL (int): Data Type: URL. Alternative hosted file of image to be used in the ad.
FINAL_URLS (int): Data Type: URL\_LIST. Required. Final URLs to be used in ad when using
Upgraded URLs; the more specific the better (e.g. the individual URL of
a specific program and its location).
FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. Final mobile URLs for the ad when using Upgraded
URLs.
TRACKING_URL (int): Data Type: URL. Tracking template for the ad when using Upgraded URLs.
CONTEXTUAL_KEYWORDS (int): Data Type: STRING\_LIST. Keywords used for product retrieval.
ANDROID_APP_LINK (int): Data Type: STRING. Android app link. Must be formatted as:
android-app://{package\_id}/{scheme}/{host\_path}. The components are
defined as follows: package\_id: app ID as specified in Google Play.
scheme: the scheme to pass to the application. Can be HTTP, or a custom
scheme. host\_path: identifies the specific content within your
application.
SIMILAR_PROGRAM_IDS (int): Data Type: STRING\_LIST. List of recommended program IDs to show
together with this item.
IOS_APP_LINK (int): Data Type: STRING. iOS app link.
IOS_APP_STORE_ID (int): Data Type: INT64. iOS app store ID.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PROGRAM_ID = 2
LOCATION_ID = 3
PROGRAM_NAME = 4
AREA_OF_STUDY = 5
PROGRAM_DESCRIPTION = 6
SCHOOL_NAME = 7
ADDRESS = 8
THUMBNAIL_IMAGE_URL = 9
ALTERNATIVE_THUMBNAIL_IMAGE_URL = 10
FINAL_URLS = 11
FINAL_MOBILE_URLS = 12
TRACKING_URL = 13
CONTEXTUAL_KEYWORDS = 14
ANDROID_APP_LINK = 15
SIMILAR_PROGRAM_IDS = 16
IOS_APP_LINK = 17
IOS_APP_STORE_ID = 18
class EnumErrorEnum(object):
class EnumError(enum.IntEnum):
"""
Enum describing possible enum errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
ENUM_VALUE_NOT_PERMITTED (int): The enum value is not permitted.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENUM_VALUE_NOT_PERMITTED = 3
class ExtensionFeedItemErrorEnum(object):
class ExtensionFeedItemError(enum.IntEnum):
"""
Enum describing possible extension feed item errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
VALUE_OUT_OF_RANGE (int): Value is not within the accepted range.
URL_LIST_TOO_LONG (int): Url list is too long.
CANNOT_HAVE_RESTRICTION_ON_EMPTY_GEO_TARGETING (int): Cannot have a geo targeting restriction without having geo targeting.
CANNOT_SET_WITH_FINAL_URLS (int): Cannot simultaneously set sitelink field with final urls.
CANNOT_SET_WITHOUT_FINAL_URLS (int): Must set field with final urls.
INVALID_PHONE_NUMBER (int): Phone number for a call extension is invalid.
PHONE_NUMBER_NOT_SUPPORTED_FOR_COUNTRY (int): Phone number for a call extension is not supported for the given country
code.
CARRIER_SPECIFIC_SHORT_NUMBER_NOT_ALLOWED (int): A carrier specific number in short format is not allowed for call
extensions.
PREMIUM_RATE_NUMBER_NOT_ALLOWED (int): Premium rate numbers are not allowed for call extensions.
DISALLOWED_NUMBER_TYPE (int): Phone number type for a call extension is not allowed.
For example, personal number is not allowed for a call extension in
most regions.
INVALID_DOMESTIC_PHONE_NUMBER_FORMAT (int): Phone number for a call extension does not meet domestic format
requirements.
VANITY_PHONE_NUMBER_NOT_ALLOWED (int): Vanity phone numbers (i.e. those including letters) are not allowed for
call extensions.
INVALID_CALL_CONVERSION_ACTION (int): Call conversion action provided for a call extension is invalid.
CUSTOMER_NOT_WHITELISTED_FOR_CALLTRACKING (int): For a call extension, the customer is not whitelisted for call tracking.
CALLTRACKING_NOT_SUPPORTED_FOR_COUNTRY (int): Call tracking is not supported for the given country for a call
extension.
CUSTOMER_CONSENT_FOR_CALL_RECORDING_REQUIRED (int): Customer hasn't consented for call recording, which is required for
creating/updating call feed items.
INVALID_APP_ID (int): App id provided for an app extension is invalid.
QUOTES_IN_REVIEW_EXTENSION_SNIPPET (int): Quotation marks present in the review text for a review extension.
HYPHENS_IN_REVIEW_EXTENSION_SNIPPET (int): Hyphen character present in the review text for a review extension.
REVIEW_EXTENSION_SOURCE_INELIGIBLE (int): A blacklisted review source name or url was provided for a review
extension.
SOURCE_NAME_IN_REVIEW_EXTENSION_TEXT (int): Review source name should not be found in the review text.
INCONSISTENT_CURRENCY_CODES (int): Inconsistent currency codes.
PRICE_EXTENSION_HAS_DUPLICATED_HEADERS (int): Price extension cannot have duplicated headers.
PRICE_ITEM_HAS_DUPLICATED_HEADER_AND_DESCRIPTION (int): Price item cannot have duplicated header and description.
PRICE_EXTENSION_HAS_TOO_FEW_ITEMS (int): Price extension has too few items.
PRICE_EXTENSION_HAS_TOO_MANY_ITEMS (int): Price extension has too many items.
UNSUPPORTED_VALUE (int): The input value is not currently supported.
UNSUPPORTED_VALUE_IN_SELECTED_LANGUAGE (int): The input value is not currently supported in the selected language of an
extension.
INVALID_DEVICE_PREFERENCE (int): Unknown or unsupported device preference.
INVALID_SCHEDULE_END (int): Invalid feed item schedule end time (i.e., endHour = 24 and endMinute !=
0).
DATE_TIME_MUST_BE_IN_ACCOUNT_TIME_ZONE (int): Date time zone does not match the account's time zone.
INVALID_SNIPPETS_HEADER (int): Invalid structured snippet header.
CANNOT_OPERATE_ON_REMOVED_FEED_ITEM (int): Cannot operate on removed feed item.
PHONE_NUMBER_NOT_SUPPORTED_WITH_CALLTRACKING_FOR_COUNTRY (int): Phone number not supported when call tracking enabled for country.
CONFLICTING_CALL_CONVERSION_SETTINGS (int): Cannot set call\_conversion\_action while
call\_conversion\_tracking\_enabled is set to true.
EXTENSION_TYPE_MISMATCH (int): The type of the input extension feed item doesn't match the existing
extension feed item.
EXTENSION_SUBTYPE_REQUIRED (int): The oneof field extension i.e. subtype of extension feed item is
required.
EXTENSION_TYPE_UNSUPPORTED (int): The referenced feed item is not mapped to a supported extension type.
CANNOT_OPERATE_ON_FEED_WITH_MULTIPLE_MAPPINGS (int): Cannot operate on a Feed with more than one active FeedMapping.
CANNOT_OPERATE_ON_FEED_WITH_KEY_ATTRIBUTES (int): Cannot operate on a Feed that has key attributes.
INVALID_PRICE_FORMAT (int): Input price is not in a valid format.
PROMOTION_INVALID_TIME (int): The promotion time is invalid.
TOO_MANY_DECIMAL_PLACES_SPECIFIED (int): This field has too many decimal places specified.
"""
UNSPECIFIED = 0
UNKNOWN = 1
VALUE_OUT_OF_RANGE = 2
URL_LIST_TOO_LONG = 3
CANNOT_HAVE_RESTRICTION_ON_EMPTY_GEO_TARGETING = 4
CANNOT_SET_WITH_FINAL_URLS = 5
CANNOT_SET_WITHOUT_FINAL_URLS = 6
INVALID_PHONE_NUMBER = 7
PHONE_NUMBER_NOT_SUPPORTED_FOR_COUNTRY = 8
CARRIER_SPECIFIC_SHORT_NUMBER_NOT_ALLOWED = 9
PREMIUM_RATE_NUMBER_NOT_ALLOWED = 10
DISALLOWED_NUMBER_TYPE = 11
INVALID_DOMESTIC_PHONE_NUMBER_FORMAT = 12
VANITY_PHONE_NUMBER_NOT_ALLOWED = 13
INVALID_CALL_CONVERSION_ACTION = 14
CUSTOMER_NOT_WHITELISTED_FOR_CALLTRACKING = 15
CALLTRACKING_NOT_SUPPORTED_FOR_COUNTRY = 16
CUSTOMER_CONSENT_FOR_CALL_RECORDING_REQUIRED = 17
INVALID_APP_ID = 18
QUOTES_IN_REVIEW_EXTENSION_SNIPPET = 19
HYPHENS_IN_REVIEW_EXTENSION_SNIPPET = 20
REVIEW_EXTENSION_SOURCE_INELIGIBLE = 21
SOURCE_NAME_IN_REVIEW_EXTENSION_TEXT = 22
INCONSISTENT_CURRENCY_CODES = 23
PRICE_EXTENSION_HAS_DUPLICATED_HEADERS = 24
PRICE_ITEM_HAS_DUPLICATED_HEADER_AND_DESCRIPTION = 25
PRICE_EXTENSION_HAS_TOO_FEW_ITEMS = 26
PRICE_EXTENSION_HAS_TOO_MANY_ITEMS = 27
UNSUPPORTED_VALUE = 28
UNSUPPORTED_VALUE_IN_SELECTED_LANGUAGE = 29
INVALID_DEVICE_PREFERENCE = 30
INVALID_SCHEDULE_END = 31
DATE_TIME_MUST_BE_IN_ACCOUNT_TIME_ZONE = 32
INVALID_SNIPPETS_HEADER = 33
CANNOT_OPERATE_ON_REMOVED_FEED_ITEM = 34
PHONE_NUMBER_NOT_SUPPORTED_WITH_CALLTRACKING_FOR_COUNTRY = 35
CONFLICTING_CALL_CONVERSION_SETTINGS = 36
EXTENSION_TYPE_MISMATCH = 37
EXTENSION_SUBTYPE_REQUIRED = 38
EXTENSION_TYPE_UNSUPPORTED = 39
CANNOT_OPERATE_ON_FEED_WITH_MULTIPLE_MAPPINGS = 40
CANNOT_OPERATE_ON_FEED_WITH_KEY_ATTRIBUTES = 41
INVALID_PRICE_FORMAT = 42
PROMOTION_INVALID_TIME = 43
TOO_MANY_DECIMAL_PLACES_SPECIFIED = 44
class ExtensionSettingDeviceEnum(object):
class ExtensionSettingDevice(enum.IntEnum):
"""
Possbile device types for an extension setting.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
MOBILE (int): Mobile. The extensions in the extension setting will only serve on
mobile devices.
DESKTOP (int): Desktop. The extensions in the extension setting will only serve on
desktop devices.
"""
UNSPECIFIED = 0
UNKNOWN = 1
MOBILE = 2
DESKTOP = 3
class ExtensionSettingErrorEnum(object):
class ExtensionSettingError(enum.IntEnum):
"""
Enum describing possible extension setting errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
EXTENSIONS_REQUIRED (int): A platform restriction was provided without input extensions or existing
extensions.
FEED_TYPE_EXTENSION_TYPE_MISMATCH (int): The provided feed type does not correspond to the provided extensions.
INVALID_FEED_TYPE (int): The provided feed type cannot be used.
INVALID_FEED_TYPE_FOR_CUSTOMER_EXTENSION_SETTING (int): The provided feed type cannot be used at the customer level.
CANNOT_CHANGE_FEED_ITEM_ON_CREATE (int): Cannot change a feed item field on a CREATE operation.
CANNOT_UPDATE_NEWLY_CREATED_EXTENSION (int): Cannot update an extension that is not already in this setting.
NO_EXISTING_AD_GROUP_EXTENSION_SETTING_FOR_TYPE (int): There is no existing AdGroupExtensionSetting for this type.
NO_EXISTING_CAMPAIGN_EXTENSION_SETTING_FOR_TYPE (int): There is no existing CampaignExtensionSetting for this type.
NO_EXISTING_CUSTOMER_EXTENSION_SETTING_FOR_TYPE (int): There is no existing CustomerExtensionSetting for this type.
AD_GROUP_EXTENSION_SETTING_ALREADY_EXISTS (int): The AdGroupExtensionSetting already exists. UPDATE should be used to
modify the existing AdGroupExtensionSetting.
CAMPAIGN_EXTENSION_SETTING_ALREADY_EXISTS (int): The CampaignExtensionSetting already exists. UPDATE should be used to
modify the existing CampaignExtensionSetting.
CUSTOMER_EXTENSION_SETTING_ALREADY_EXISTS (int): The CustomerExtensionSetting already exists. UPDATE should be used to
modify the existing CustomerExtensionSetting.
AD_GROUP_FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE (int): An active ad group feed already exists for this place holder type.
CAMPAIGN_FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE (int): An active campaign feed already exists for this place holder type.
CUSTOMER_FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE (int): An active customer feed already exists for this place holder type.
VALUE_OUT_OF_RANGE (int): Value is not within the accepted range.
CANNOT_SET_FIELD_WITH_FINAL_URLS (int): Cannot simultaneously set specified field with final urls.
FINAL_URLS_NOT_SET (int): Must set field with final urls.
INVALID_PHONE_NUMBER (int): Phone number for a call extension is invalid.
PHONE_NUMBER_NOT_SUPPORTED_FOR_COUNTRY (int): Phone number for a call extension is not supported for the given country
code.
CARRIER_SPECIFIC_SHORT_NUMBER_NOT_ALLOWED (int): A carrier specific number in short format is not allowed for call
extensions.
PREMIUM_RATE_NUMBER_NOT_ALLOWED (int): Premium rate numbers are not allowed for call extensions.
DISALLOWED_NUMBER_TYPE (int): Phone number type for a call extension is not allowed.
INVALID_DOMESTIC_PHONE_NUMBER_FORMAT (int): Phone number for a call extension does not meet domestic format
requirements.
VANITY_PHONE_NUMBER_NOT_ALLOWED (int): Vanity phone numbers (i.e. those including letters) are not allowed for
call extensions.
INVALID_COUNTRY_CODE (int): Country code provided for a call extension is invalid.
INVALID_CALL_CONVERSION_TYPE_ID (int): Call conversion type id provided for a call extension is invalid.
CUSTOMER_NOT_WHITELISTED_FOR_CALLTRACKING (int): For a call extension, the customer is not whitelisted for call tracking.
CALLTRACKING_NOT_SUPPORTED_FOR_COUNTRY (int): Call tracking is not supported for the given country for a call
extension.
INVALID_APP_ID (int): App id provided for an app extension is invalid.
QUOTES_IN_REVIEW_EXTENSION_SNIPPET (int): Quotation marks present in the review text for a review extension.
HYPHENS_IN_REVIEW_EXTENSION_SNIPPET (int): Hyphen character present in the review text for a review extension.
REVIEW_EXTENSION_SOURCE_NOT_ELIGIBLE (int): A blacklisted review source name or url was provided for a review
extension.
SOURCE_NAME_IN_REVIEW_EXTENSION_TEXT (int): Review source name should not be found in the review text.
MISSING_FIELD (int): Field must be set.
INCONSISTENT_CURRENCY_CODES (int): Inconsistent currency codes.
PRICE_EXTENSION_HAS_DUPLICATED_HEADERS (int): Price extension cannot have duplicated headers.
PRICE_ITEM_HAS_DUPLICATED_HEADER_AND_DESCRIPTION (int): Price item cannot have duplicated header and description.
PRICE_EXTENSION_HAS_TOO_FEW_ITEMS (int): Price extension has too few items
PRICE_EXTENSION_HAS_TOO_MANY_ITEMS (int): Price extension has too many items
UNSUPPORTED_VALUE (int): The input value is not currently supported.
INVALID_DEVICE_PREFERENCE (int): Unknown or unsupported device preference.
INVALID_SCHEDULE_END (int): Invalid feed item schedule end time (i.e., endHour = 24 and
endMinute != 0).
DATE_TIME_MUST_BE_IN_ACCOUNT_TIME_ZONE (int): Date time zone does not match the account's time zone.
OVERLAPPING_SCHEDULES_NOT_ALLOWED (int): Overlapping feed item schedule times (e.g., 7-10AM and 8-11AM) are not
allowed.
SCHEDULE_END_NOT_AFTER_START (int): Feed item schedule end time must be after start time.
TOO_MANY_SCHEDULES_PER_DAY (int): There are too many feed item schedules per day.
DUPLICATE_EXTENSION_FEED_ITEM_EDIT (int): Cannot edit the same extension feed item more than once in the same
request.
INVALID_SNIPPETS_HEADER (int): Invalid structured snippet header.
PHONE_NUMBER_NOT_SUPPORTED_WITH_CALLTRACKING_FOR_COUNTRY (int): Phone number with call tracking enabled is not supported for the
specified country.
CAMPAIGN_TARGETING_MISMATCH (int): The targeted adgroup must belong to the targeted campaign.
CANNOT_OPERATE_ON_REMOVED_FEED (int): The feed used by the ExtensionSetting is removed and cannot be operated
on. Remove the ExtensionSetting to allow a new one to be created using
an active feed.
EXTENSION_TYPE_REQUIRED (int): The ExtensionFeedItem type is required for this operation.
INCOMPATIBLE_UNDERLYING_MATCHING_FUNCTION (int): The matching function that links the extension feed to the customer,
campaign, or ad group is not compatible with the ExtensionSetting
services.
START_DATE_AFTER_END_DATE (int): Start date must be before end date.
INVALID_PRICE_FORMAT (int): Input price is not in a valid format.
PROMOTION_INVALID_TIME (int): The promotion time is invalid.
PROMOTION_CANNOT_SET_PERCENT_DISCOUNT_AND_MONEY_DISCOUNT (int): Cannot set both percent discount and money discount fields.
PROMOTION_CANNOT_SET_PROMOTION_CODE_AND_ORDERS_OVER_AMOUNT (int): Cannot set both promotion code and orders over amount fields.
TOO_MANY_DECIMAL_PLACES_SPECIFIED (int): This field has too many decimal places specified.
INVALID_LANGUAGE_CODE (int): The language code is not valid.
UNSUPPORTED_LANGUAGE (int): The language is not supported.
CUSTOMER_CONSENT_FOR_CALL_RECORDING_REQUIRED (int): Customer hasn't consented for call recording, which is required for
adding/updating call extensions.
EXTENSION_SETTING_UPDATE_IS_A_NOOP (int): The UPDATE operation does not specify any fields other than the resource
name in the update mask.
"""
UNSPECIFIED = 0
UNKNOWN = 1
EXTENSIONS_REQUIRED = 2
FEED_TYPE_EXTENSION_TYPE_MISMATCH = 3
INVALID_FEED_TYPE = 4
INVALID_FEED_TYPE_FOR_CUSTOMER_EXTENSION_SETTING = 5
CANNOT_CHANGE_FEED_ITEM_ON_CREATE = 6
CANNOT_UPDATE_NEWLY_CREATED_EXTENSION = 7
NO_EXISTING_AD_GROUP_EXTENSION_SETTING_FOR_TYPE = 8
NO_EXISTING_CAMPAIGN_EXTENSION_SETTING_FOR_TYPE = 9
NO_EXISTING_CUSTOMER_EXTENSION_SETTING_FOR_TYPE = 10
AD_GROUP_EXTENSION_SETTING_ALREADY_EXISTS = 11
CAMPAIGN_EXTENSION_SETTING_ALREADY_EXISTS = 12
CUSTOMER_EXTENSION_SETTING_ALREADY_EXISTS = 13
AD_GROUP_FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE = 14
CAMPAIGN_FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE = 15
CUSTOMER_FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE = 16
VALUE_OUT_OF_RANGE = 17
CANNOT_SET_FIELD_WITH_FINAL_URLS = 18
FINAL_URLS_NOT_SET = 19
INVALID_PHONE_NUMBER = 20
PHONE_NUMBER_NOT_SUPPORTED_FOR_COUNTRY = 21
CARRIER_SPECIFIC_SHORT_NUMBER_NOT_ALLOWED = 22
PREMIUM_RATE_NUMBER_NOT_ALLOWED = 23
DISALLOWED_NUMBER_TYPE = 24
INVALID_DOMESTIC_PHONE_NUMBER_FORMAT = 25
VANITY_PHONE_NUMBER_NOT_ALLOWED = 26
INVALID_COUNTRY_CODE = 27
INVALID_CALL_CONVERSION_TYPE_ID = 28
CUSTOMER_NOT_WHITELISTED_FOR_CALLTRACKING = 29
CALLTRACKING_NOT_SUPPORTED_FOR_COUNTRY = 30
INVALID_APP_ID = 31
QUOTES_IN_REVIEW_EXTENSION_SNIPPET = 32
HYPHENS_IN_REVIEW_EXTENSION_SNIPPET = 33
REVIEW_EXTENSION_SOURCE_NOT_ELIGIBLE = 34
SOURCE_NAME_IN_REVIEW_EXTENSION_TEXT = 35
MISSING_FIELD = 36
INCONSISTENT_CURRENCY_CODES = 37
PRICE_EXTENSION_HAS_DUPLICATED_HEADERS = 38
PRICE_ITEM_HAS_DUPLICATED_HEADER_AND_DESCRIPTION = 39
PRICE_EXTENSION_HAS_TOO_FEW_ITEMS = 40
PRICE_EXTENSION_HAS_TOO_MANY_ITEMS = 41
UNSUPPORTED_VALUE = 42
INVALID_DEVICE_PREFERENCE = 43
INVALID_SCHEDULE_END = 45
DATE_TIME_MUST_BE_IN_ACCOUNT_TIME_ZONE = 47
OVERLAPPING_SCHEDULES_NOT_ALLOWED = 48
SCHEDULE_END_NOT_AFTER_START = 49
TOO_MANY_SCHEDULES_PER_DAY = 50
DUPLICATE_EXTENSION_FEED_ITEM_EDIT = 51
INVALID_SNIPPETS_HEADER = 52
PHONE_NUMBER_NOT_SUPPORTED_WITH_CALLTRACKING_FOR_COUNTRY = 53
CAMPAIGN_TARGETING_MISMATCH = 54
CANNOT_OPERATE_ON_REMOVED_FEED = 55
EXTENSION_TYPE_REQUIRED = 56
INCOMPATIBLE_UNDERLYING_MATCHING_FUNCTION = 57
START_DATE_AFTER_END_DATE = 58
INVALID_PRICE_FORMAT = 59
PROMOTION_INVALID_TIME = 60
PROMOTION_CANNOT_SET_PERCENT_DISCOUNT_AND_MONEY_DISCOUNT = 61
PROMOTION_CANNOT_SET_PROMOTION_CODE_AND_ORDERS_OVER_AMOUNT = 62
TOO_MANY_DECIMAL_PLACES_SPECIFIED = 63
INVALID_LANGUAGE_CODE = 64
UNSUPPORTED_LANGUAGE = 65
CUSTOMER_CONSENT_FOR_CALL_RECORDING_REQUIRED = 66
EXTENSION_SETTING_UPDATE_IS_A_NOOP = 67
class ExtensionTypeEnum(object):
class ExtensionType(enum.IntEnum):
"""
Possible data types for an extension in an extension setting.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
NONE (int): None.
APP (int): App.
CALL (int): Call.
CALLOUT (int): Callout.
MESSAGE (int): Message.
PRICE (int): Price.
PROMOTION (int): Promotion.
REVIEW (int): Review.
SITELINK (int): Sitelink.
STRUCTURED_SNIPPET (int): Structured snippet.
"""
UNSPECIFIED = 0
UNKNOWN = 1
NONE = 2
APP = 3
CALL = 4
CALLOUT = 5
MESSAGE = 6
PRICE = 7
PROMOTION = 8
REVIEW = 9
SITELINK = 10
STRUCTURED_SNIPPET = 11
class ExternalConversionSourceEnum(object):
class ExternalConversionSource(enum.IntEnum):
"""
The external conversion source that is associated with a ConversionAction.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Represents value unknown in this version.
WEBPAGE (int): Conversion that occurs when a user navigates to a particular webpage
after viewing an ad; Displayed in Google Ads UI as 'Website'.
ANALYTICS (int): Conversion that comes from linked Google Analytics goal or transaction;
Displayed in Google Ads UI as 'Analytics'.
UPLOAD (int): Website conversion that is uploaded through ConversionUploadService;
Displayed in Google Ads UI as 'Import from clicks'.
AD_CALL_METRICS (int): Conversion that occurs when a user clicks on a call extension directly on
an ad; Displayed in Google Ads UI as 'Calls from ads'.
WEBSITE_CALL_METRICS (int): Conversion that occurs when a user calls a dynamically-generated phone
number (by installed javascript) from an advertiser's website after
clicking on an ad; Displayed in Google Ads UI as 'Calls from website'.
STORE_VISITS (int): Conversion that occurs when a user visits an advertiser's retail store
after clicking on a Google ad;
Displayed in Google Ads UI as 'Store visits'.
ANDROID_IN_APP (int): Conversion that occurs when a user takes an in-app action such as a
purchase in an Android app;
Displayed in Google Ads UI as 'Android in-app action'.
IOS_IN_APP (int): Conversion that occurs when a user takes an in-app action such as a
purchase in an iOS app;
Displayed in Google Ads UI as 'iOS in-app action'.
IOS_FIRST_OPEN (int): Conversion that occurs when a user opens an iOS app for the first time;
Displayed in Google Ads UI as 'iOS app install (first open)'.
APP_UNSPECIFIED (int): Legacy app conversions that do not have an AppPlatform provided;
Displayed in Google Ads UI as 'Mobile app'.
ANDROID_FIRST_OPEN (int): Conversion that occurs when a user opens an Android app for the first
time; Displayed in Google Ads UI as 'Android app install (first open)'.
UPLOAD_CALLS (int): Call conversion that is uploaded through ConversionUploadService;
Displayed in Google Ads UI as 'Import from calls'.
FIREBASE (int): Conversion that comes from a linked Firebase event;
Displayed in Google Ads UI as 'Firebase'.
CLICK_TO_CALL (int): Conversion that occurs when a user clicks on a mobile phone number;
Displayed in Google Ads UI as 'Phone number clicks'.
SALESFORCE (int): Conversion that comes from Salesforce;
Displayed in Google Ads UI as 'Salesforce.com'.
STORE_SALES_CRM (int): Conversion that comes from in-store purchases recorded by CRM;
Displayed in Google Ads UI as 'Store sales (data partner)'.
STORE_SALES_PAYMENT_NETWORK (int): Conversion that comes from in-store purchases from payment network;
Displayed in Google Ads UI as 'Store sales (payment network)'.
GOOGLE_PLAY (int): Codeless Google Play conversion;
Displayed in Google Ads UI as 'Google Play'.
THIRD_PARTY_APP_ANALYTICS (int): Conversion that comes from a linked third-party app analytics event;
Displayed in Google Ads UI as 'Third-party app analytics'.
GOOGLE_ATTRIBUTION (int): Conversion that is controlled by Google Attribution.
STORE_SALES_DIRECT (int): Store Sales conversion based on first-party or third-party merchant data
uploads. Displayed in Google Ads UI as 'Store sales (direct)'.
"""
UNSPECIFIED = 0
UNKNOWN = 1
WEBPAGE = 2
ANALYTICS = 3
UPLOAD = 4
AD_CALL_METRICS = 5
WEBSITE_CALL_METRICS = 6
STORE_VISITS = 7
ANDROID_IN_APP = 8
IOS_IN_APP = 9
IOS_FIRST_OPEN = 10
APP_UNSPECIFIED = 11
ANDROID_FIRST_OPEN = 12
UPLOAD_CALLS = 13
FIREBASE = 14
CLICK_TO_CALL = 15
SALESFORCE = 16
STORE_SALES_CRM = 17
STORE_SALES_PAYMENT_NETWORK = 18
GOOGLE_PLAY = 19
THIRD_PARTY_APP_ANALYTICS = 20
GOOGLE_ATTRIBUTION = 21
STORE_SALES_DIRECT = 22
class FeedAttributeOperation(object):
class Operator(enum.IntEnum):
"""
The operator.
Attributes:
UNSPECIFIED (int): Unspecified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ADD (int): Add the attribute to the existing attributes.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ADD = 2
class FeedAttributeReferenceErrorEnum(object):
class FeedAttributeReferenceError(enum.IntEnum):
"""
Enum describing possible feed attribute reference errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CANNOT_REFERENCE_REMOVED_FEED (int): A feed referenced by ID has been removed.
INVALID_FEED_NAME (int): There is no enabled feed with the given name.
INVALID_FEED_ATTRIBUTE_NAME (int): There is no feed attribute in an enabled feed with the given name.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CANNOT_REFERENCE_REMOVED_FEED = 2
INVALID_FEED_NAME = 3
INVALID_FEED_ATTRIBUTE_NAME = 4
class FeedAttributeTypeEnum(object):
class FeedAttributeType(enum.IntEnum):
"""
Possible data types for a feed attribute.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
INT64 (int): Int64.
DOUBLE (int): Double.
STRING (int): String.
BOOLEAN (int): Boolean.
URL (int): Url.
DATE_TIME (int): Datetime.
INT64_LIST (int): Int64 list.
DOUBLE_LIST (int): Double (8 bytes) list.
STRING_LIST (int): String list.
BOOLEAN_LIST (int): Boolean list.
URL_LIST (int): Url list.
DATE_TIME_LIST (int): Datetime list.
PRICE (int): Price.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INT64 = 2
DOUBLE = 3
STRING = 4
BOOLEAN = 5
URL = 6
DATE_TIME = 7
INT64_LIST = 8
DOUBLE_LIST = 9
STRING_LIST = 10
BOOLEAN_LIST = 11
URL_LIST = 12
DATE_TIME_LIST = 13
PRICE = 14
class FeedErrorEnum(object):
class FeedError(enum.IntEnum):
"""
Enum describing possible feed errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
ATTRIBUTE_NAMES_NOT_UNIQUE (int): The names of the FeedAttributes must be unique.
ATTRIBUTES_DO_NOT_MATCH_EXISTING_ATTRIBUTES (int): The attribute list must be an exact copy of the existing list if the
attribute ID's are present.
CANNOT_SPECIFY_USER_ORIGIN_FOR_SYSTEM_FEED (int): Cannot specify USER origin for a system generated feed.
CANNOT_SPECIFY_GOOGLE_ORIGIN_FOR_NON_SYSTEM_FEED (int): Cannot specify GOOGLE origin for a non-system generated feed.
CANNOT_SPECIFY_FEED_ATTRIBUTES_FOR_SYSTEM_FEED (int): Cannot specify feed attributes for system feed.
CANNOT_UPDATE_FEED_ATTRIBUTES_WITH_ORIGIN_GOOGLE (int): Cannot update FeedAttributes on feed with origin GOOGLE.
FEED_REMOVED (int): The given ID refers to a removed Feed. Removed Feeds are immutable.
INVALID_ORIGIN_VALUE (int): The origin of the feed is not valid for the client.
FEED_ORIGIN_IS_NOT_USER (int): A user can only create and modify feeds with USER origin.
INVALID_AUTH_TOKEN_FOR_EMAIL (int): Invalid auth token for the given email.
INVALID_EMAIL (int): Invalid email specified.
DUPLICATE_FEED_NAME (int): Feed name matches that of another active Feed.
INVALID_FEED_NAME (int): Name of feed is not allowed.
MISSING_OAUTH_INFO (int): Missing OAuthInfo.
NEW_ATTRIBUTE_CANNOT_BE_PART_OF_UNIQUE_KEY (int): New FeedAttributes must not affect the unique key.
TOO_MANY_ATTRIBUTES (int): Too many FeedAttributes for a Feed.
INVALID_BUSINESS_ACCOUNT (int): The business account is not valid.
BUSINESS_ACCOUNT_CANNOT_ACCESS_LOCATION_ACCOUNT (int): Business account cannot access Google My Business account.
INVALID_AFFILIATE_CHAIN_ID (int): Invalid chain ID provided for affiliate location feed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ATTRIBUTE_NAMES_NOT_UNIQUE = 2
ATTRIBUTES_DO_NOT_MATCH_EXISTING_ATTRIBUTES = 3
CANNOT_SPECIFY_USER_ORIGIN_FOR_SYSTEM_FEED = 4
CANNOT_SPECIFY_GOOGLE_ORIGIN_FOR_NON_SYSTEM_FEED = 5
CANNOT_SPECIFY_FEED_ATTRIBUTES_FOR_SYSTEM_FEED = 6
CANNOT_UPDATE_FEED_ATTRIBUTES_WITH_ORIGIN_GOOGLE = 7
FEED_REMOVED = 8
INVALID_ORIGIN_VALUE = 9
FEED_ORIGIN_IS_NOT_USER = 10
INVALID_AUTH_TOKEN_FOR_EMAIL = 11
INVALID_EMAIL = 12
DUPLICATE_FEED_NAME = 13
INVALID_FEED_NAME = 14
MISSING_OAUTH_INFO = 15
NEW_ATTRIBUTE_CANNOT_BE_PART_OF_UNIQUE_KEY = 16
TOO_MANY_ATTRIBUTES = 17
INVALID_BUSINESS_ACCOUNT = 18
BUSINESS_ACCOUNT_CANNOT_ACCESS_LOCATION_ACCOUNT = 19
INVALID_AFFILIATE_CHAIN_ID = 20
class FeedItemErrorEnum(object):
class FeedItemError(enum.IntEnum):
"""
Enum describing possible feed item errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CANNOT_CONVERT_ATTRIBUTE_VALUE_FROM_STRING (int): Cannot convert the feed attribute value from string to its real type.
CANNOT_OPERATE_ON_REMOVED_FEED_ITEM (int): Cannot operate on removed feed item.
DATE_TIME_MUST_BE_IN_ACCOUNT_TIME_ZONE (int): Date time zone does not match the account's time zone.
KEY_ATTRIBUTES_NOT_FOUND (int): Feed item with the key attributes could not be found.
INVALID_URL (int): Url feed attribute value is not valid.
MISSING_KEY_ATTRIBUTES (int): Some key attributes are missing.
KEY_ATTRIBUTES_NOT_UNIQUE (int): Feed item has same key attributes as another feed item.
CANNOT_MODIFY_KEY_ATTRIBUTE_VALUE (int): Cannot modify key attributes on an existing feed item.
SIZE_TOO_LARGE_FOR_MULTI_VALUE_ATTRIBUTE (int): The feed attribute value is too large.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CANNOT_CONVERT_ATTRIBUTE_VALUE_FROM_STRING = 2
CANNOT_OPERATE_ON_REMOVED_FEED_ITEM = 3
DATE_TIME_MUST_BE_IN_ACCOUNT_TIME_ZONE = 4
KEY_ATTRIBUTES_NOT_FOUND = 5
INVALID_URL = 6
MISSING_KEY_ATTRIBUTES = 7
KEY_ATTRIBUTES_NOT_UNIQUE = 8
CANNOT_MODIFY_KEY_ATTRIBUTE_VALUE = 9
SIZE_TOO_LARGE_FOR_MULTI_VALUE_ATTRIBUTE = 10
class FeedItemQualityApprovalStatusEnum(object):
class FeedItemQualityApprovalStatus(enum.IntEnum):
"""
The possible quality evaluation approval statuses of a feed item.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
APPROVED (int): Meets all quality expectations.
DISAPPROVED (int): Does not meet some quality expectations. The specific reason is found in
the quality\_disapproval\_reasons field.
"""
UNSPECIFIED = 0
UNKNOWN = 1
APPROVED = 2
DISAPPROVED = 3
class FeedItemQualityDisapprovalReasonEnum(object):
class FeedItemQualityDisapprovalReason(enum.IntEnum):
"""
The possible quality evaluation disapproval reasons of a feed item.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PRICE_TABLE_REPETITIVE_HEADERS (int): Price contains repetitive headers.
PRICE_TABLE_REPETITIVE_DESCRIPTION (int): Price contains repetitive description.
PRICE_TABLE_INCONSISTENT_ROWS (int): Price contains inconsistent items.
PRICE_DESCRIPTION_HAS_PRICE_QUALIFIERS (int): Price contains qualifiers in description.
PRICE_UNSUPPORTED_LANGUAGE (int): Price contains an unsupported language.
PRICE_TABLE_ROW_HEADER_TABLE_TYPE_MISMATCH (int): Price item header is not relevant to the price type.
PRICE_TABLE_ROW_HEADER_HAS_PROMOTIONAL_TEXT (int): Price item header has promotional text.
PRICE_TABLE_ROW_DESCRIPTION_NOT_RELEVANT (int): Price item description is not relevant to the item header.
PRICE_TABLE_ROW_DESCRIPTION_HAS_PROMOTIONAL_TEXT (int): Price item description contains promotional text.
PRICE_TABLE_ROW_HEADER_DESCRIPTION_REPETITIVE (int): Price item header and description are repetitive.
PRICE_TABLE_ROW_UNRATEABLE (int): Price item is in a foreign language, nonsense, or can't be rated.
PRICE_TABLE_ROW_PRICE_INVALID (int): Price item price is invalid or inaccurate.
PRICE_TABLE_ROW_URL_INVALID (int): Price item URL is invalid or irrelevant.
PRICE_HEADER_OR_DESCRIPTION_HAS_PRICE (int): Price item header or description has price.
STRUCTURED_SNIPPETS_HEADER_POLICY_VIOLATED (int): Structured snippet values do not match the header.
STRUCTURED_SNIPPETS_REPEATED_VALUES (int): Structured snippet values are repeated.
STRUCTURED_SNIPPETS_EDITORIAL_GUIDELINES (int): Structured snippet values violate editorial guidelines like punctuation.
STRUCTURED_SNIPPETS_HAS_PROMOTIONAL_TEXT (int): Structured snippet contain promotional text.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PRICE_TABLE_REPETITIVE_HEADERS = 2
PRICE_TABLE_REPETITIVE_DESCRIPTION = 3
PRICE_TABLE_INCONSISTENT_ROWS = 4
PRICE_DESCRIPTION_HAS_PRICE_QUALIFIERS = 5
PRICE_UNSUPPORTED_LANGUAGE = 6
PRICE_TABLE_ROW_HEADER_TABLE_TYPE_MISMATCH = 7
PRICE_TABLE_ROW_HEADER_HAS_PROMOTIONAL_TEXT = 8
PRICE_TABLE_ROW_DESCRIPTION_NOT_RELEVANT = 9
PRICE_TABLE_ROW_DESCRIPTION_HAS_PROMOTIONAL_TEXT = 10
PRICE_TABLE_ROW_HEADER_DESCRIPTION_REPETITIVE = 11
PRICE_TABLE_ROW_UNRATEABLE = 12
PRICE_TABLE_ROW_PRICE_INVALID = 13
PRICE_TABLE_ROW_URL_INVALID = 14
PRICE_HEADER_OR_DESCRIPTION_HAS_PRICE = 15
STRUCTURED_SNIPPETS_HEADER_POLICY_VIOLATED = 16
STRUCTURED_SNIPPETS_REPEATED_VALUES = 17
STRUCTURED_SNIPPETS_EDITORIAL_GUIDELINES = 18
STRUCTURED_SNIPPETS_HAS_PROMOTIONAL_TEXT = 19
class FeedItemStatusEnum(object):
class FeedItemStatus(enum.IntEnum):
"""
Possible statuses of a feed item.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ENABLED (int): Feed item is enabled.
REMOVED (int): Feed item has been removed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
class FeedItemTargetDeviceEnum(object):
class FeedItemTargetDevice(enum.IntEnum):
"""
Possible data types for a feed item target device.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
MOBILE (int): Mobile.
"""
UNSPECIFIED = 0
UNKNOWN = 1
MOBILE = 2
class FeedItemTargetErrorEnum(object):
class FeedItemTargetError(enum.IntEnum):
"""
Enum describing possible feed item target errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
MUST_SET_TARGET_ONEOF_ON_CREATE (int): On CREATE, the FeedItemTarget must have a populated field in the oneof
target.
FEED_ITEM_TARGET_ALREADY_EXISTS (int): The specified feed item target already exists, so it cannot be added.
FEED_ITEM_SCHEDULES_CANNOT_OVERLAP (int): The schedules for a given feed item cannot overlap.
TARGET_LIMIT_EXCEEDED_FOR_GIVEN_TYPE (int): Too many targets of a given type were added for a single feed item.
TOO_MANY_SCHEDULES_PER_DAY (int): Too many AdSchedules are enabled for the feed item for the given day.
CANNOT_HAVE_ENABLED_CAMPAIGN_AND_ENABLED_AD_GROUP_TARGETS (int): A feed item may either have an enabled campaign target or an enabled ad
group target.
"""
UNSPECIFIED = 0
UNKNOWN = 1
MUST_SET_TARGET_ONEOF_ON_CREATE = 2
FEED_ITEM_TARGET_ALREADY_EXISTS = 3
FEED_ITEM_SCHEDULES_CANNOT_OVERLAP = 4
TARGET_LIMIT_EXCEEDED_FOR_GIVEN_TYPE = 5
TOO_MANY_SCHEDULES_PER_DAY = 6
CANNOT_HAVE_ENABLED_CAMPAIGN_AND_ENABLED_AD_GROUP_TARGETS = 7
class FeedItemTargetTypeEnum(object):
class FeedItemTargetType(enum.IntEnum):
"""
Possible type of a feed item target.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
CAMPAIGN (int): Feed item targets a campaign.
AD_GROUP (int): Feed item targets an ad group.
CRITERION (int): Feed item targets a criterion.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CAMPAIGN = 2
AD_GROUP = 3
CRITERION = 4
class FeedItemValidationErrorEnum(object):
class FeedItemValidationError(enum.IntEnum):
"""
The possible validation errors of a feed item.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
STRING_TOO_SHORT (int): String is too short.
STRING_TOO_LONG (int): String is too long.
VALUE_NOT_SPECIFIED (int): Value is not provided.
INVALID_DOMESTIC_PHONE_NUMBER_FORMAT (int): Phone number format is invalid for region.
INVALID_PHONE_NUMBER (int): String does not represent a phone number.
PHONE_NUMBER_NOT_SUPPORTED_FOR_COUNTRY (int): Phone number format is not compatible with country code.
PREMIUM_RATE_NUMBER_NOT_ALLOWED (int): Premium rate number is not allowed.
DISALLOWED_NUMBER_TYPE (int): Phone number type is not allowed.
VALUE_OUT_OF_RANGE (int): Specified value is outside of the valid range.
CALLTRACKING_NOT_SUPPORTED_FOR_COUNTRY (int): Call tracking is not supported in the selected country.
CUSTOMER_NOT_WHITELISTED_FOR_CALLTRACKING (int): Customer is not whitelisted for call tracking.
INVALID_COUNTRY_CODE (int): Country code is invalid.
INVALID_APP_ID (int): The specified mobile app id is invalid.
MISSING_ATTRIBUTES_FOR_FIELDS (int): Some required field attributes are missing.
INVALID_TYPE_ID (int): Invalid email button type for email extension.
INVALID_EMAIL_ADDRESS (int): Email address is invalid.
INVALID_HTTPS_URL (int): The HTTPS URL in email extension is invalid.
MISSING_DELIVERY_ADDRESS (int): Delivery address is missing from email extension.
START_DATE_AFTER_END_DATE (int): FeedItem scheduling start date comes after end date.
MISSING_FEED_ITEM_START_TIME (int): FeedItem scheduling start time is missing.
MISSING_FEED_ITEM_END_TIME (int): FeedItem scheduling end time is missing.
MISSING_FEED_ITEM_ID (int): Cannot compute system attributes on a FeedItem that has no FeedItemId.
VANITY_PHONE_NUMBER_NOT_ALLOWED (int): Call extension vanity phone numbers are not supported.
INVALID_REVIEW_EXTENSION_SNIPPET (int): Invalid review text.
INVALID_NUMBER_FORMAT (int): Invalid format for numeric value in ad parameter.
INVALID_DATE_FORMAT (int): Invalid format for date value in ad parameter.
INVALID_PRICE_FORMAT (int): Invalid format for price value in ad parameter.
UNKNOWN_PLACEHOLDER_FIELD (int): Unrecognized type given for value in ad parameter.
MISSING_ENHANCED_SITELINK_DESCRIPTION_LINE (int): Enhanced sitelinks must have both description lines specified.
REVIEW_EXTENSION_SOURCE_INELIGIBLE (int): Review source is ineligible.
HYPHENS_IN_REVIEW_EXTENSION_SNIPPET (int): Review text cannot contain hyphens or dashes.
DOUBLE_QUOTES_IN_REVIEW_EXTENSION_SNIPPET (int): Review text cannot contain double quote characters.
QUOTES_IN_REVIEW_EXTENSION_SNIPPET (int): Review text cannot contain quote characters.
INVALID_FORM_ENCODED_PARAMS (int): Parameters are encoded in the wrong format.
INVALID_URL_PARAMETER_NAME (int): URL parameter name must contain only letters, numbers, underscores, and
dashes.
NO_GEOCODING_RESULT (int): Cannot find address location.
SOURCE_NAME_IN_REVIEW_EXTENSION_TEXT (int): Review extension text has source name.
CARRIER_SPECIFIC_SHORT_NUMBER_NOT_ALLOWED (int): Some phone numbers can be shorter than usual. Some of these short numbers
are carrier-specific, and we disallow those in ad extensions because they
will not be available to all users.
INVALID_PLACEHOLDER_FIELD_ID (int): Triggered when a request references a placeholder field id that does not
exist.
INVALID_URL_TAG (int): URL contains invalid ValueTrack tags or format.
LIST_TOO_LONG (int): Provided list exceeds acceptable size.
INVALID_ATTRIBUTES_COMBINATION (int): Certain combinations of attributes aren't allowed to be specified in the
same feed item.
DUPLICATE_VALUES (int): An attribute has the same value repeatedly.
INVALID_CALL_CONVERSION_ACTION_ID (int): Advertisers can link a conversion action with a phone number to indicate
that sufficiently long calls forwarded to that phone number should be
counted as conversions of the specified type. This is an error message
indicating that the conversion action specified is invalid (e.g., the
conversion action does not exist within the appropriate Google Ads
account, or it is a type of conversion not appropriate to phone call
conversions).
CANNOT_SET_WITHOUT_FINAL_URLS (int): Tracking template requires final url to be set.
APP_ID_DOESNT_EXIST_IN_APP_STORE (int): An app id was provided that doesn't exist in the given app store.
INVALID_FINAL_URL (int): Invalid U2 final url.
INVALID_TRACKING_URL (int): Invalid U2 tracking url.
INVALID_FINAL_URL_FOR_APP_DOWNLOAD_URL (int): Final URL should start from App download URL.
LIST_TOO_SHORT (int): List provided is too short.
INVALID_USER_ACTION (int): User Action field has invalid value.
INVALID_TYPE_NAME (int): Type field has invalid value.
INVALID_EVENT_CHANGE_STATUS (int): Change status for event is invalid.
INVALID_SNIPPETS_HEADER (int): The header of a structured snippets extension is not one of the valid
headers.
INVALID_ANDROID_APP_LINK (int): Android app link is not formatted correctly
NUMBER_TYPE_WITH_CALLTRACKING_NOT_SUPPORTED_FOR_COUNTRY (int): Phone number incompatible with call tracking for country.
RESERVED_KEYWORD_OTHER (int): The input is identical to a reserved keyword
DUPLICATE_OPTION_LABELS (int): Each option label in the message extension must be unique.
DUPLICATE_OPTION_PREFILLS (int): Each option prefill in the message extension must be unique.
UNEQUAL_LIST_LENGTHS (int): In message extensions, the number of optional labels and optional
prefills must be the same.
INCONSISTENT_CURRENCY_CODES (int): All currency codes in an ad extension must be the same.
PRICE_EXTENSION_HAS_DUPLICATED_HEADERS (int): Headers in price extension are not unique.
ITEM_HAS_DUPLICATED_HEADER_AND_DESCRIPTION (int): Header and description in an item are the same.
PRICE_EXTENSION_HAS_TOO_FEW_ITEMS (int): Price extension has too few items.
UNSUPPORTED_VALUE (int): The given value is not supported.
INVALID_FINAL_MOBILE_URL (int): Invalid final mobile url.
INVALID_KEYWORDLESS_AD_RULE_LABEL (int): The given string value of Label contains invalid characters
VALUE_TRACK_PARAMETER_NOT_SUPPORTED (int): The given URL contains value track parameters.
UNSUPPORTED_VALUE_IN_SELECTED_LANGUAGE (int): The given value is not supported in the selected language of an
extension.
INVALID_IOS_APP_LINK (int): The iOS app link is not formatted correctly.
MISSING_IOS_APP_LINK_OR_IOS_APP_STORE_ID (int): iOS app link or iOS app store id is missing.
PROMOTION_INVALID_TIME (int): Promotion time is invalid.
PROMOTION_CANNOT_SET_PERCENT_OFF_AND_MONEY_AMOUNT_OFF (int): Both the percent off and money amount off fields are set.
PROMOTION_CANNOT_SET_PROMOTION_CODE_AND_ORDERS_OVER_AMOUNT (int): Both the promotion code and orders over amount fields are set.
TOO_MANY_DECIMAL_PLACES_SPECIFIED (int): Too many decimal places are specified.
AD_CUSTOMIZERS_NOT_ALLOWED (int): Ad Customizers are present and not allowed.
INVALID_LANGUAGE_CODE (int): Language code is not valid.
UNSUPPORTED_LANGUAGE (int): Language is not supported.
IF_FUNCTION_NOT_ALLOWED (int): IF Function is present and not allowed.
INVALID_FINAL_URL_SUFFIX (int): Final url suffix is not valid.
INVALID_TAG_IN_FINAL_URL_SUFFIX (int): Final url suffix contains an invalid tag.
INVALID_FINAL_URL_SUFFIX_FORMAT (int): Final url suffix is formatted incorrectly.
CUSTOMER_CONSENT_FOR_CALL_RECORDING_REQUIRED (int): Consent for call recording, which is required for the use of call
extensions, was not provided by the advertiser.
ONLY_ONE_DELIVERY_OPTION_IS_ALLOWED (int): Multiple message delivery options are set.
NO_DELIVERY_OPTION_IS_SET (int): No message delivery option is set.
INVALID_CONVERSION_REPORTING_STATE (int): String value of conversion reporting state field is not valid.
IMAGE_SIZE_WRONG (int): Image size is not right.
EMAIL_DELIVERY_NOT_AVAILABLE_IN_COUNTRY (int): Email delivery is not supported in the country specified in the country
code field.
AUTO_REPLY_NOT_AVAILABLE_IN_COUNTRY (int): Auto reply is not supported in the country specified in the country code
field.
"""
UNSPECIFIED = 0
UNKNOWN = 1
STRING_TOO_SHORT = 2
STRING_TOO_LONG = 3
VALUE_NOT_SPECIFIED = 4
INVALID_DOMESTIC_PHONE_NUMBER_FORMAT = 5
INVALID_PHONE_NUMBER = 6
PHONE_NUMBER_NOT_SUPPORTED_FOR_COUNTRY = 7
PREMIUM_RATE_NUMBER_NOT_ALLOWED = 8
DISALLOWED_NUMBER_TYPE = 9
VALUE_OUT_OF_RANGE = 10
CALLTRACKING_NOT_SUPPORTED_FOR_COUNTRY = 11
CUSTOMER_NOT_WHITELISTED_FOR_CALLTRACKING = 12
INVALID_COUNTRY_CODE = 13
INVALID_APP_ID = 14
MISSING_ATTRIBUTES_FOR_FIELDS = 15
INVALID_TYPE_ID = 16
INVALID_EMAIL_ADDRESS = 17
INVALID_HTTPS_URL = 18
MISSING_DELIVERY_ADDRESS = 19
START_DATE_AFTER_END_DATE = 20
MISSING_FEED_ITEM_START_TIME = 21
MISSING_FEED_ITEM_END_TIME = 22
MISSING_FEED_ITEM_ID = 23
VANITY_PHONE_NUMBER_NOT_ALLOWED = 24
INVALID_REVIEW_EXTENSION_SNIPPET = 25
INVALID_NUMBER_FORMAT = 26
INVALID_DATE_FORMAT = 27
INVALID_PRICE_FORMAT = 28
UNKNOWN_PLACEHOLDER_FIELD = 29
MISSING_ENHANCED_SITELINK_DESCRIPTION_LINE = 30
REVIEW_EXTENSION_SOURCE_INELIGIBLE = 31
HYPHENS_IN_REVIEW_EXTENSION_SNIPPET = 32
DOUBLE_QUOTES_IN_REVIEW_EXTENSION_SNIPPET = 33
QUOTES_IN_REVIEW_EXTENSION_SNIPPET = 34
INVALID_FORM_ENCODED_PARAMS = 35
INVALID_URL_PARAMETER_NAME = 36
NO_GEOCODING_RESULT = 37
SOURCE_NAME_IN_REVIEW_EXTENSION_TEXT = 38
CARRIER_SPECIFIC_SHORT_NUMBER_NOT_ALLOWED = 39
INVALID_PLACEHOLDER_FIELD_ID = 40
INVALID_URL_TAG = 41
LIST_TOO_LONG = 42
INVALID_ATTRIBUTES_COMBINATION = 43
DUPLICATE_VALUES = 44
INVALID_CALL_CONVERSION_ACTION_ID = 45
CANNOT_SET_WITHOUT_FINAL_URLS = 46
APP_ID_DOESNT_EXIST_IN_APP_STORE = 47
INVALID_FINAL_URL = 48
INVALID_TRACKING_URL = 49
INVALID_FINAL_URL_FOR_APP_DOWNLOAD_URL = 50
LIST_TOO_SHORT = 51
INVALID_USER_ACTION = 52
INVALID_TYPE_NAME = 53
INVALID_EVENT_CHANGE_STATUS = 54
INVALID_SNIPPETS_HEADER = 55
INVALID_ANDROID_APP_LINK = 56
NUMBER_TYPE_WITH_CALLTRACKING_NOT_SUPPORTED_FOR_COUNTRY = 57
RESERVED_KEYWORD_OTHER = 58
DUPLICATE_OPTION_LABELS = 59
DUPLICATE_OPTION_PREFILLS = 60
UNEQUAL_LIST_LENGTHS = 61
INCONSISTENT_CURRENCY_CODES = 62
PRICE_EXTENSION_HAS_DUPLICATED_HEADERS = 63
ITEM_HAS_DUPLICATED_HEADER_AND_DESCRIPTION = 64
PRICE_EXTENSION_HAS_TOO_FEW_ITEMS = 65
UNSUPPORTED_VALUE = 66
INVALID_FINAL_MOBILE_URL = 67
INVALID_KEYWORDLESS_AD_RULE_LABEL = 68
VALUE_TRACK_PARAMETER_NOT_SUPPORTED = 69
UNSUPPORTED_VALUE_IN_SELECTED_LANGUAGE = 70
INVALID_IOS_APP_LINK = 71
MISSING_IOS_APP_LINK_OR_IOS_APP_STORE_ID = 72
PROMOTION_INVALID_TIME = 73
PROMOTION_CANNOT_SET_PERCENT_OFF_AND_MONEY_AMOUNT_OFF = 74
PROMOTION_CANNOT_SET_PROMOTION_CODE_AND_ORDERS_OVER_AMOUNT = 75
TOO_MANY_DECIMAL_PLACES_SPECIFIED = 76
AD_CUSTOMIZERS_NOT_ALLOWED = 77
INVALID_LANGUAGE_CODE = 78
UNSUPPORTED_LANGUAGE = 79
IF_FUNCTION_NOT_ALLOWED = 80
INVALID_FINAL_URL_SUFFIX = 81
INVALID_TAG_IN_FINAL_URL_SUFFIX = 82
INVALID_FINAL_URL_SUFFIX_FORMAT = 83
CUSTOMER_CONSENT_FOR_CALL_RECORDING_REQUIRED = 84
ONLY_ONE_DELIVERY_OPTION_IS_ALLOWED = 85
NO_DELIVERY_OPTION_IS_SET = 86
INVALID_CONVERSION_REPORTING_STATE = 87
IMAGE_SIZE_WRONG = 88
EMAIL_DELIVERY_NOT_AVAILABLE_IN_COUNTRY = 89
AUTO_REPLY_NOT_AVAILABLE_IN_COUNTRY = 90
class FeedItemValidationStatusEnum(object):
class FeedItemValidationStatus(enum.IntEnum):
"""
The possible validation statuses of a feed item.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PENDING (int): Validation pending.
INVALID (int): An error was found.
VALID (int): Feed item is semantically well-formed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PENDING = 2
INVALID = 3
VALID = 4
class FeedLinkStatusEnum(object):
class FeedLinkStatus(enum.IntEnum):
"""
Possible statuses of a feed link.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ENABLED (int): Feed link is enabled.
REMOVED (int): Feed link has been removed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
class FeedMappingCriterionTypeEnum(object):
class FeedMappingCriterionType(enum.IntEnum):
"""
Possible placeholder types for a feed mapping.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
LOCATION_EXTENSION_TARGETING (int): Allows campaign targeting at locations within a location feed.
DSA_PAGE_FEED (int): Allows url targeting for your dynamic search ads within a page feed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
LOCATION_EXTENSION_TARGETING = 4
DSA_PAGE_FEED = 3
class FeedMappingErrorEnum(object):
class FeedMappingError(enum.IntEnum):
"""
Enum describing possible feed item errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
INVALID_PLACEHOLDER_FIELD (int): The given placeholder field does not exist.
INVALID_CRITERION_FIELD (int): The given criterion field does not exist.
INVALID_PLACEHOLDER_TYPE (int): The given placeholder type does not exist.
INVALID_CRITERION_TYPE (int): The given criterion type does not exist.
NO_ATTRIBUTE_FIELD_MAPPINGS (int): A feed mapping must contain at least one attribute field mapping.
FEED_ATTRIBUTE_TYPE_MISMATCH (int): The type of the feed attribute referenced in the attribute field mapping
must match the type of the placeholder field.
CANNOT_OPERATE_ON_MAPPINGS_FOR_SYSTEM_GENERATED_FEED (int): A feed mapping for a system generated feed cannot be operated on.
MULTIPLE_MAPPINGS_FOR_PLACEHOLDER_TYPE (int): Only one feed mapping for a placeholder type is allowed per feed or
customer (depending on the placeholder type).
MULTIPLE_MAPPINGS_FOR_CRITERION_TYPE (int): Only one feed mapping for a criterion type is allowed per customer.
MULTIPLE_MAPPINGS_FOR_PLACEHOLDER_FIELD (int): Only one feed attribute mapping for a placeholder field is allowed
(depending on the placeholder type).
MULTIPLE_MAPPINGS_FOR_CRITERION_FIELD (int): Only one feed attribute mapping for a criterion field is allowed
(depending on the criterion type).
UNEXPECTED_ATTRIBUTE_FIELD_MAPPINGS (int): This feed mapping may not contain any explicit attribute field mappings.
LOCATION_PLACEHOLDER_ONLY_FOR_PLACES_FEEDS (int): Location placeholder feed mappings can only be created for Places feeds.
CANNOT_MODIFY_MAPPINGS_FOR_TYPED_FEED (int): Mappings for typed feeds cannot be modified.
INVALID_PLACEHOLDER_TYPE_FOR_NON_SYSTEM_GENERATED_FEED (int): The given placeholder type can only be mapped to system generated feeds.
INVALID_PLACEHOLDER_TYPE_FOR_SYSTEM_GENERATED_FEED_TYPE (int): The given placeholder type cannot be mapped to a system generated feed
with the given type.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_PLACEHOLDER_FIELD = 2
INVALID_CRITERION_FIELD = 3
INVALID_PLACEHOLDER_TYPE = 4
INVALID_CRITERION_TYPE = 5
NO_ATTRIBUTE_FIELD_MAPPINGS = 7
FEED_ATTRIBUTE_TYPE_MISMATCH = 8
CANNOT_OPERATE_ON_MAPPINGS_FOR_SYSTEM_GENERATED_FEED = 9
MULTIPLE_MAPPINGS_FOR_PLACEHOLDER_TYPE = 10
MULTIPLE_MAPPINGS_FOR_CRITERION_TYPE = 11
MULTIPLE_MAPPINGS_FOR_PLACEHOLDER_FIELD = 12
MULTIPLE_MAPPINGS_FOR_CRITERION_FIELD = 13
UNEXPECTED_ATTRIBUTE_FIELD_MAPPINGS = 14
LOCATION_PLACEHOLDER_ONLY_FOR_PLACES_FEEDS = 15
CANNOT_MODIFY_MAPPINGS_FOR_TYPED_FEED = 16
INVALID_PLACEHOLDER_TYPE_FOR_NON_SYSTEM_GENERATED_FEED = 17
INVALID_PLACEHOLDER_TYPE_FOR_SYSTEM_GENERATED_FEED_TYPE = 18
class FeedMappingStatusEnum(object):
class FeedMappingStatus(enum.IntEnum):
"""
Possible statuses of a feed mapping.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ENABLED (int): Feed mapping is enabled.
REMOVED (int): Feed mapping has been removed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
class FeedOriginEnum(object):
class FeedOrigin(enum.IntEnum):
"""
Possible values for a feed origin.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
USER (int): The FeedAttributes for this Feed are managed by the
user. Users can add FeedAttributes to this Feed.
GOOGLE (int): The FeedAttributes for an GOOGLE Feed are created by Google. A feed of
this type is maintained by Google and will have the correct attributes
for the placeholder type of the feed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
USER = 2
GOOGLE = 3
class FeedStatusEnum(object):
class FeedStatus(enum.IntEnum):
"""
Possible statuses of a feed.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ENABLED (int): Feed is enabled.
REMOVED (int): Feed has been removed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
class FieldErrorEnum(object):
class FieldError(enum.IntEnum):
"""
Enum describing possible field errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
REQUIRED (int): The required field was not present in the resource.
IMMUTABLE_FIELD (int): The field attempted to be mutated is immutable.
INVALID_VALUE (int): The field's value is invalid.
VALUE_MUST_BE_UNSET (int): The field cannot be set.
REQUIRED_NONEMPTY_LIST (int): The required repeated field was empty.
FIELD_CANNOT_BE_CLEARED (int): The field cannot be cleared.
BLACKLISTED_VALUE (int): The field's value is on a blacklist for this field.
"""
UNSPECIFIED = 0
UNKNOWN = 1
REQUIRED = 2
IMMUTABLE_FIELD = 3
INVALID_VALUE = 4
VALUE_MUST_BE_UNSET = 5
REQUIRED_NONEMPTY_LIST = 6
FIELD_CANNOT_BE_CLEARED = 7
BLACKLISTED_VALUE = 8
class FieldMaskErrorEnum(object):
class FieldMaskError(enum.IntEnum):
"""
Enum describing possible field mask errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
FIELD_MASK_MISSING (int): The field mask must be provided for update operations.
FIELD_MASK_NOT_ALLOWED (int): The field mask must be empty for create and remove operations.
FIELD_NOT_FOUND (int): The field mask contained an invalid field.
FIELD_HAS_SUBFIELDS (int): The field mask updated a field with subfields. Fields with subfields may
be cleared, but not updated. To fix this, the field mask should select
all the subfields of the invalid field.
"""
UNSPECIFIED = 0
UNKNOWN = 1
FIELD_MASK_MISSING = 5
FIELD_MASK_NOT_ALLOWED = 4
FIELD_NOT_FOUND = 2
FIELD_HAS_SUBFIELDS = 3
class FlightPlaceholderFieldEnum(object):
class FlightPlaceholderField(enum.IntEnum):
"""
Possible values for Flight placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
DESTINATION_ID (int): Data Type: STRING. Required. Destination id. Example: PAR, LON.
For feed items that only have destination id, destination id must be a
unique key. For feed items that have both destination id and origin id,
then the combination must be a unique key.
ORIGIN_ID (int): Data Type: STRING. Origin id. Example: PAR, LON.
Optional. Combination of destination id and origin id must be unique per
offer.
FLIGHT_DESCRIPTION (int): Data Type: STRING. Required. Main headline with product name to be shown
in dynamic ad.
ORIGIN_NAME (int): Data Type: STRING. Shorter names are recommended.
DESTINATION_NAME (int): Data Type: STRING. Shorter names are recommended.
FLIGHT_PRICE (int): Data Type: STRING. Price to be shown in the ad.
Example: "100.00 USD"
FORMATTED_PRICE (int): Data Type: STRING. Formatted price to be shown in the ad.
Example: "Starting at $100.00 USD", "$80 - $100"
FLIGHT_SALE_PRICE (int): Data Type: STRING. Sale price to be shown in the ad.
Example: "80.00 USD"
FORMATTED_SALE_PRICE (int): Data Type: STRING. Formatted sale price to be shown in the ad.
Example: "On sale for $80.00", "$60 - $80"
IMAGE_URL (int): Data Type: URL. Image to be displayed in the ad.
FINAL_URLS (int): Data Type: URL\_LIST. Required. Final URLs for the ad when using
Upgraded URLs. User will be redirected to these URLs when they click on
an ad, or when they click on a specific flight for ads that show
multiple flights.
FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. Final mobile URLs for the ad when using Upgraded
URLs.
TRACKING_URL (int): Data Type: URL. Tracking template for the ad when using Upgraded URLs.
ANDROID_APP_LINK (int): Data Type: STRING. Android app link. Must be formatted as:
android-app://{package\_id}/{scheme}/{host\_path}. The components are
defined as follows: package\_id: app ID as specified in Google Play.
scheme: the scheme to pass to the application. Can be HTTP, or a custom
scheme. host\_path: identifies the specific content within your
application.
SIMILAR_DESTINATION_IDS (int): Data Type: STRING\_LIST. List of recommended destination IDs to show
together with this item.
IOS_APP_LINK (int): Data Type: STRING. iOS app link.
IOS_APP_STORE_ID (int): Data Type: INT64. iOS app store ID.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DESTINATION_ID = 2
ORIGIN_ID = 3
FLIGHT_DESCRIPTION = 4
ORIGIN_NAME = 5
DESTINATION_NAME = 6
FLIGHT_PRICE = 7
FORMATTED_PRICE = 8
FLIGHT_SALE_PRICE = 9
FORMATTED_SALE_PRICE = 10
IMAGE_URL = 11
FINAL_URLS = 12
FINAL_MOBILE_URLS = 13
TRACKING_URL = 14
ANDROID_APP_LINK = 15
SIMILAR_DESTINATION_IDS = 16
IOS_APP_LINK = 17
IOS_APP_STORE_ID = 18
class FrequencyCapEventTypeEnum(object):
class FrequencyCapEventType(enum.IntEnum):
"""
The type of event that the cap applies to (e.g. impression).
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
IMPRESSION (int): The cap applies on ad impressions.
VIDEO_VIEW (int): The cap applies on video ad views.
"""
UNSPECIFIED = 0
UNKNOWN = 1
IMPRESSION = 2
VIDEO_VIEW = 3
class FrequencyCapLevelEnum(object):
class FrequencyCapLevel(enum.IntEnum):
"""
The level on which the cap is to be applied (e.g ad group ad, ad group).
Cap is applied to all the resources of this level.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
AD_GROUP_AD (int): The cap is applied at the ad group ad level.
AD_GROUP (int): The cap is applied at the ad group level.
CAMPAIGN (int): The cap is applied at the campaign level.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AD_GROUP_AD = 2
AD_GROUP = 3
CAMPAIGN = 4
class FrequencyCapTimeUnitEnum(object):
class FrequencyCapTimeUnit(enum.IntEnum):
"""
Unit of time the cap is defined at (e.g. day, week).
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
DAY (int): The cap would define limit per one day.
WEEK (int): The cap would define limit per one week.
MONTH (int): The cap would define limit per one month.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DAY = 2
WEEK = 3
MONTH = 4
class FunctionErrorEnum(object):
class FunctionError(enum.IntEnum):
"""
Enum describing possible function errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
INVALID_FUNCTION_FORMAT (int): The format of the function is not recognized as a supported function
format.
DATA_TYPE_MISMATCH (int): Operand data types do not match.
INVALID_CONJUNCTION_OPERANDS (int): The operands cannot be used together in a conjunction.
INVALID_NUMBER_OF_OPERANDS (int): Invalid numer of Operands.
INVALID_OPERAND_TYPE (int): Operand Type not supported.
INVALID_OPERATOR (int): Operator not supported.
INVALID_REQUEST_CONTEXT_TYPE (int): Request context type not supported.
INVALID_FUNCTION_FOR_CALL_PLACEHOLDER (int): The matching function is not allowed for call placeholders
INVALID_FUNCTION_FOR_PLACEHOLDER (int): The matching function is not allowed for the specified placeholder
INVALID_OPERAND (int): Invalid operand.
MISSING_CONSTANT_OPERAND_VALUE (int): Missing value for the constant operand.
INVALID_CONSTANT_OPERAND_VALUE (int): The value of the constant operand is invalid.
INVALID_NESTING (int): Invalid function nesting.
MULTIPLE_FEED_IDS_NOT_SUPPORTED (int): The Feed ID was different from another Feed ID in the same function.
INVALID_FUNCTION_FOR_FEED_WITH_FIXED_SCHEMA (int): The matching function is invalid for use with a feed with a fixed schema.
INVALID_ATTRIBUTE_NAME (int): Invalid attribute name.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_FUNCTION_FORMAT = 2
DATA_TYPE_MISMATCH = 3
INVALID_CONJUNCTION_OPERANDS = 4
INVALID_NUMBER_OF_OPERANDS = 5
INVALID_OPERAND_TYPE = 6
INVALID_OPERATOR = 7
INVALID_REQUEST_CONTEXT_TYPE = 8
INVALID_FUNCTION_FOR_CALL_PLACEHOLDER = 9
INVALID_FUNCTION_FOR_PLACEHOLDER = 10
INVALID_OPERAND = 11
MISSING_CONSTANT_OPERAND_VALUE = 12
INVALID_CONSTANT_OPERAND_VALUE = 13
INVALID_NESTING = 14
MULTIPLE_FEED_IDS_NOT_SUPPORTED = 15
INVALID_FUNCTION_FOR_FEED_WITH_FIXED_SCHEMA = 16
INVALID_ATTRIBUTE_NAME = 17
class FunctionParsingErrorEnum(object):
class FunctionParsingError(enum.IntEnum):
"""
Enum describing possible function parsing errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
NO_MORE_INPUT (int): Unexpected end of function string.
EXPECTED_CHARACTER (int): Could not find an expected character.
UNEXPECTED_SEPARATOR (int): Unexpected separator character.
UNMATCHED_LEFT_BRACKET (int): Unmatched left bracket or parenthesis.
UNMATCHED_RIGHT_BRACKET (int): Unmatched right bracket or parenthesis.
TOO_MANY_NESTED_FUNCTIONS (int): Functions are nested too deeply.
MISSING_RIGHT_HAND_OPERAND (int): Missing right-hand-side operand.
INVALID_OPERATOR_NAME (int): Invalid operator/function name.
FEED_ATTRIBUTE_OPERAND_ARGUMENT_NOT_INTEGER (int): Feed attribute operand's argument is not an integer.
NO_OPERANDS (int): Missing function operands.
TOO_MANY_OPERANDS (int): Function had too many operands.
"""
UNSPECIFIED = 0
UNKNOWN = 1
NO_MORE_INPUT = 2
EXPECTED_CHARACTER = 3
UNEXPECTED_SEPARATOR = 4
UNMATCHED_LEFT_BRACKET = 5
UNMATCHED_RIGHT_BRACKET = 6
TOO_MANY_NESTED_FUNCTIONS = 7
MISSING_RIGHT_HAND_OPERAND = 8
INVALID_OPERATOR_NAME = 9
FEED_ATTRIBUTE_OPERAND_ARGUMENT_NOT_INTEGER = 10
NO_OPERANDS = 11
TOO_MANY_OPERANDS = 12
class GenderTypeEnum(object):
class GenderType(enum.IntEnum):
"""
The type of demographic genders (e.g. female).
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
MALE (int): Male.
FEMALE (int): Female.
UNDETERMINED (int): Undetermined gender.
"""
UNSPECIFIED = 0
UNKNOWN = 1
MALE = 10
FEMALE = 11
UNDETERMINED = 20
class GeoTargetConstantStatusEnum(object):
class GeoTargetConstantStatus(enum.IntEnum):
"""
The possible statuses of a geo target constant.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
ENABLED (int): The geo target constant is valid.
REMOVAL_PLANNED (int): The geo target constant is obsolete and will be removed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVAL_PLANNED = 3
class GeoTargetConstantSuggestionErrorEnum(object):
class GeoTargetConstantSuggestionError(enum.IntEnum):
"""
Enum describing possible geo target constant suggestion errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
LOCATION_NAME_SIZE_LIMIT (int): A location name cannot be greater than 300 characters.
LOCATION_NAME_LIMIT (int): At most 25 location names can be specified in a SuggestGeoTargetConstants
method.
INVALID_COUNTRY_CODE (int): The country code is invalid.
REQUEST_PARAMETERS_UNSET (int): Geo target constant resource names or location names must be provided in
the request.
"""
UNSPECIFIED = 0
UNKNOWN = 1
LOCATION_NAME_SIZE_LIMIT = 2
LOCATION_NAME_LIMIT = 3
INVALID_COUNTRY_CODE = 4
REQUEST_PARAMETERS_UNSET = 5
class GeoTargetingRestrictionEnum(object):
class GeoTargetingRestriction(enum.IntEnum):
"""
A restriction used to determine if the request context's
geo should be matched.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
LOCATION_OF_PRESENCE (int): Indicates that request context should match the physical location of
the user.
"""
UNSPECIFIED = 0
UNKNOWN = 1
LOCATION_OF_PRESENCE = 2
class GeoTargetingTypeEnum(object):
class GeoTargetingType(enum.IntEnum):
"""
The possible geo targeting types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
AREA_OF_INTEREST (int): Location the user is interested in while making the query.
LOCATION_OF_PRESENCE (int): Location of the user issuing the query.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AREA_OF_INTEREST = 2
LOCATION_OF_PRESENCE = 3
class GoogleAdsFieldCategoryEnum(object):
class GoogleAdsFieldCategory(enum.IntEnum):
"""
The category of the artifact.
Attributes:
UNSPECIFIED (int): Unspecified
UNKNOWN (int): Unknown
RESOURCE (int): The described artifact is a resource.
ATTRIBUTE (int): The described artifact is a field and is an attribute of a resource.
Including a resource attribute field in a query may segment the query if
the resource to which it is attributed segments the resource found in
the FROM clause.
SEGMENT (int): The described artifact is a field and always segments search queries.
METRIC (int): The described artifact is a field and is a metric. It never segments
search queries.
"""
UNSPECIFIED = 0
UNKNOWN = 1
RESOURCE = 2
ATTRIBUTE = 3
SEGMENT = 5
METRIC = 6
class GoogleAdsFieldDataTypeEnum(object):
class GoogleAdsFieldDataType(enum.IntEnum):
"""
These are the various types a GoogleAdsService artifact may take on.
Attributes:
UNSPECIFIED (int): Unspecified
UNKNOWN (int): Unknown
BOOLEAN (int): Maps to google.protobuf.BoolValue
Applicable operators: =, !=
DATE (int): Maps to google.protobuf.StringValue. It can be compared using the set of
operators specific to dates however.
Applicable operators: =, <, >, <=, >=, BETWEEN, DURING, and IN
DOUBLE (int): Maps to google.protobuf.DoubleValue
Applicable operators: =, !=, <, >, IN, NOT IN
ENUM (int): Maps to an enum. It's specific definition can be found at type\_url.
Applicable operators: =, !=, IN, NOT IN
FLOAT (int): Maps to google.protobuf.FloatValue
Applicable operators: =, !=, <, >, IN, NOT IN
INT32 (int): Maps to google.protobuf.Int32Value
Applicable operators: =, !=, <, >, <=, >=, BETWEEN, IN, NOT IN
INT64 (int): Maps to google.protobuf.Int64Value
Applicable operators: =, !=, <, >, <=, >=, BETWEEN, IN, NOT IN
MESSAGE (int): Maps to a protocol buffer message type. The data type's details can be
found in type\_url.
No operators work with MESSAGE fields.
RESOURCE_NAME (int): Maps to google.protobuf.StringValue. Represents the resource name
(unique id) of a resource or one of its foreign keys.
No operators work with RESOURCE\_NAME fields.
STRING (int): Maps to google.protobuf.StringValue.
Applicable operators: =, !=, LIKE, NOT LIKE, IN, NOT IN
"""
UNSPECIFIED = 0
UNKNOWN = 1
BOOLEAN = 2
DATE = 3
DOUBLE = 4
ENUM = 5
FLOAT = 6
INT32 = 7
INT64 = 8
MESSAGE = 9
RESOURCE_NAME = 10
STRING = 11
class HeaderErrorEnum(object):
class HeaderError(enum.IntEnum):
"""
Enum describing possible header errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
INVALID_LOGIN_CUSTOMER_ID (int): The login customer id could not be validated.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_LOGIN_CUSTOMER_ID = 3
class HotelDateSelectionTypeEnum(object):
class HotelDateSelectionType(enum.IntEnum):
"""
Enum describing possible hotel date selection types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
DEFAULT_SELECTION (int): Dates selected by default.
USER_SELECTED (int): Dates selected by the user.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DEFAULT_SELECTION = 50
USER_SELECTED = 51
class HotelPlaceholderFieldEnum(object):
class HotelPlaceholderField(enum.IntEnum):
"""
Possible values for Hotel placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PROPERTY_ID (int): Data Type: STRING. Required. Unique ID.
PROPERTY_NAME (int): Data Type: STRING. Required. Main headline with property name to be shown
in dynamic ad.
DESTINATION_NAME (int): Data Type: STRING. Name of destination to be shown in dynamic ad.
DESCRIPTION (int): Data Type: STRING. Description of destination to be shown in dynamic ad.
ADDRESS (int): Data Type: STRING. Complete property address, including postal code.
PRICE (int): Data Type: STRING. Price to be shown in the ad.
Example: "100.00 USD"
FORMATTED_PRICE (int): Data Type: STRING. Formatted price to be shown in the ad.
Example: "Starting at $100.00 USD", "$80 - $100"
SALE_PRICE (int): Data Type: STRING. Sale price to be shown in the ad.
Example: "80.00 USD"
FORMATTED_SALE_PRICE (int): Data Type: STRING. Formatted sale price to be shown in the ad.
Example: "On sale for $80.00", "$60 - $80"
IMAGE_URL (int): Data Type: URL. Image to be displayed in the ad.
CATEGORY (int): Data Type: STRING. Category of property used to group like items together
for recommendation engine.
STAR_RATING (int): Data Type: INT64. Star rating (1 to 5) used to group like items
together for recommendation engine.
CONTEXTUAL_KEYWORDS (int): Data Type: STRING\_LIST. Keywords used for product retrieval.
FINAL_URLS (int): Data Type: URL\_LIST. Required. Final URLs for the ad when using
Upgraded URLs. User will be redirected to these URLs when they click on
an ad, or when they click on a specific flight for ads that show
multiple flights.
FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. Final mobile URLs for the ad when using Upgraded
URLs.
TRACKING_URL (int): Data Type: URL. Tracking template for the ad when using Upgraded URLs.
ANDROID_APP_LINK (int): Data Type: STRING. Android app link. Must be formatted as:
android-app://{package\_id}/{scheme}/{host\_path}. The components are
defined as follows: package\_id: app ID as specified in Google Play.
scheme: the scheme to pass to the application. Can be HTTP, or a custom
scheme. host\_path: identifies the specific content within your
application.
SIMILAR_PROPERTY_IDS (int): Data Type: STRING\_LIST. List of recommended property IDs to show
together with this item.
IOS_APP_LINK (int): Data Type: STRING. iOS app link.
IOS_APP_STORE_ID (int): Data Type: INT64. iOS app store ID.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PROPERTY_ID = 2
PROPERTY_NAME = 3
DESTINATION_NAME = 4
DESCRIPTION = 5
ADDRESS = 6
PRICE = 7
FORMATTED_PRICE = 8
SALE_PRICE = 9
FORMATTED_SALE_PRICE = 10
IMAGE_URL = 11
CATEGORY = 12
STAR_RATING = 13
CONTEXTUAL_KEYWORDS = 14
FINAL_URLS = 15
FINAL_MOBILE_URLS = 16
TRACKING_URL = 17
ANDROID_APP_LINK = 18
SIMILAR_PROPERTY_IDS = 19
IOS_APP_LINK = 20
IOS_APP_STORE_ID = 21
class IdErrorEnum(object):
class IdError(enum.IntEnum):
"""
Enum describing possible id errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
NOT_FOUND (int): Id not found
"""
UNSPECIFIED = 0
UNKNOWN = 1
NOT_FOUND = 2
class ImageErrorEnum(object):
class ImageError(enum.IntEnum):
"""
Enum describing possible image errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
INVALID_IMAGE (int): The image is not valid.
STORAGE_ERROR (int): The image could not be stored.
BAD_REQUEST (int): There was a problem with the request.
UNEXPECTED_SIZE (int): The image is not of legal dimensions.
ANIMATED_NOT_ALLOWED (int): Animated image are not permitted.
ANIMATION_TOO_LONG (int): Animation is too long.
SERVER_ERROR (int): There was an error on the server.
CMYK_JPEG_NOT_ALLOWED (int): Image cannot be in CMYK color format.
FLASH_NOT_ALLOWED (int): Flash images are not permitted.
FLASH_WITHOUT_CLICKTAG (int): Flash images must support clickTag.
FLASH_ERROR_AFTER_FIXING_CLICK_TAG (int): A flash error has occurred after fixing the click tag.
ANIMATED_VISUAL_EFFECT (int): Unacceptable visual effects.
FLASH_ERROR (int): There was a problem with the flash image.
LAYOUT_PROBLEM (int): Incorrect image layout.
PROBLEM_READING_IMAGE_FILE (int): There was a problem reading the image file.
ERROR_STORING_IMAGE (int): There was an error storing the image.
ASPECT_RATIO_NOT_ALLOWED (int): The aspect ratio of the image is not allowed.
FLASH_HAS_NETWORK_OBJECTS (int): Flash cannot have network objects.
FLASH_HAS_NETWORK_METHODS (int): Flash cannot have network methods.
FLASH_HAS_URL (int): Flash cannot have a Url.
FLASH_HAS_MOUSE_TRACKING (int): Flash cannot use mouse tracking.
FLASH_HAS_RANDOM_NUM (int): Flash cannot have a random number.
FLASH_SELF_TARGETS (int): Ad click target cannot be '\_self'.
FLASH_BAD_GETURL_TARGET (int): GetUrl method should only use '\_blank'.
FLASH_VERSION_NOT_SUPPORTED (int): Flash version is not supported.
FLASH_WITHOUT_HARD_CODED_CLICK_URL (int): Flash movies need to have hard coded click URL or clickTAG
INVALID_FLASH_FILE (int): Uploaded flash file is corrupted.
FAILED_TO_FIX_CLICK_TAG_IN_FLASH (int): Uploaded flash file can be parsed, but the click tag can not be fixed
properly.
FLASH_ACCESSES_NETWORK_RESOURCES (int): Flash movie accesses network resources
FLASH_EXTERNAL_JS_CALL (int): Flash movie attempts to call external javascript code
FLASH_EXTERNAL_FS_CALL (int): Flash movie attempts to call flash system commands
FILE_TOO_LARGE (int): Image file is too large.
IMAGE_DATA_TOO_LARGE (int): Image data is too large.
IMAGE_PROCESSING_ERROR (int): Error while processing the image.
IMAGE_TOO_SMALL (int): Image is too small.
INVALID_INPUT (int): Input was invalid.
PROBLEM_READING_FILE (int): There was a problem reading the image file.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_IMAGE = 2
STORAGE_ERROR = 3
BAD_REQUEST = 4
UNEXPECTED_SIZE = 5
ANIMATED_NOT_ALLOWED = 6
ANIMATION_TOO_LONG = 7
SERVER_ERROR = 8
CMYK_JPEG_NOT_ALLOWED = 9
FLASH_NOT_ALLOWED = 10
FLASH_WITHOUT_CLICKTAG = 11
FLASH_ERROR_AFTER_FIXING_CLICK_TAG = 12
ANIMATED_VISUAL_EFFECT = 13
FLASH_ERROR = 14
LAYOUT_PROBLEM = 15
PROBLEM_READING_IMAGE_FILE = 16
ERROR_STORING_IMAGE = 17
ASPECT_RATIO_NOT_ALLOWED = 18
FLASH_HAS_NETWORK_OBJECTS = 19
FLASH_HAS_NETWORK_METHODS = 20
FLASH_HAS_URL = 21
FLASH_HAS_MOUSE_TRACKING = 22
FLASH_HAS_RANDOM_NUM = 23
FLASH_SELF_TARGETS = 24
FLASH_BAD_GETURL_TARGET = 25
FLASH_VERSION_NOT_SUPPORTED = 26
FLASH_WITHOUT_HARD_CODED_CLICK_URL = 27
INVALID_FLASH_FILE = 28
FAILED_TO_FIX_CLICK_TAG_IN_FLASH = 29
FLASH_ACCESSES_NETWORK_RESOURCES = 30
FLASH_EXTERNAL_JS_CALL = 31
FLASH_EXTERNAL_FS_CALL = 32
FILE_TOO_LARGE = 33
IMAGE_DATA_TOO_LARGE = 34
IMAGE_PROCESSING_ERROR = 35
IMAGE_TOO_SMALL = 36
INVALID_INPUT = 37
PROBLEM_READING_FILE = 38
class IncomeRangeTypeEnum(object):
class IncomeRangeType(enum.IntEnum):
"""
The type of demographic income ranges (e.g. between 0% to 50%).
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
INCOME_RANGE_0_50 (int): 0%-50%.
INCOME_RANGE_50_60 (int): 50% to 60%.
INCOME_RANGE_60_70 (int): 60% to 70%.
INCOME_RANGE_70_80 (int): 70% to 80%.
INCOME_RANGE_80_90 (int): 80% to 90%.
INCOME_RANGE_90_UP (int): Greater than 90%.
INCOME_RANGE_UNDETERMINED (int): Undetermined income range.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INCOME_RANGE_0_50 = 510001
INCOME_RANGE_50_60 = 510002
INCOME_RANGE_60_70 = 510003
INCOME_RANGE_70_80 = 510004
INCOME_RANGE_80_90 = 510005
INCOME_RANGE_90_UP = 510006
INCOME_RANGE_UNDETERMINED = 510000
class InteractionEventTypeEnum(object):
class InteractionEventType(enum.IntEnum):
"""
Enum describing possible types of payable and free interactions.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
CLICK (int): Click to site. In most cases, this interaction navigates to an external
location, usually the advertiser's landing page. This is also the default
InteractionEventType for click events.
ENGAGEMENT (int): The user's expressed intent to engage with the ad in-place.
VIDEO_VIEW (int): User viewed a video ad.
NONE (int): The default InteractionEventType for ad conversion events.
This is used when an ad conversion row does NOT indicate
that the free interactions (i.e., the ad conversions)
should be 'promoted' and reported as part of the core metrics.
These are simply other (ad) conversions.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CLICK = 2
ENGAGEMENT = 3
VIDEO_VIEW = 4
NONE = 5
class InteractionTypeEnum(object):
class InteractionType(enum.IntEnum):
"""
Enum describing possible interaction types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
CALLS (int): Calls.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CALLS = 8000
class InternalErrorEnum(object):
class InternalError(enum.IntEnum):
"""
Enum describing possible internal errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
INTERNAL_ERROR (int): Google Ads API encountered unexpected internal error.
ERROR_CODE_NOT_PUBLISHED (int): The intended error code doesn't exist in any API version. This will be
fixed by adding a new error code as soon as possible.
TRANSIENT_ERROR (int): Google Ads API encountered an unexpected transient error. The user
should retry their request in these cases.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INTERNAL_ERROR = 2
ERROR_CODE_NOT_PUBLISHED = 3
TRANSIENT_ERROR = 4
class JobPlaceholderFieldEnum(object):
class JobPlaceholderField(enum.IntEnum):
"""
Possible values for Job placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
JOB_ID (int): Data Type: STRING. Required. If only JOB\_ID is specified, then it must
be unique. If both JOB\_ID and LOCATION\_ID are specified, then the pair
must be unique. ID) pair must be unique.
LOCATION_ID (int): Data Type: STRING. Combination of JOB\_ID and LOCATION\_ID must be
unique per offer.
TITLE (int): Data Type: STRING. Required. Main headline with job title to be shown in
dynamic ad.
SUBTITLE (int): Data Type: STRING. Job subtitle to be shown in dynamic ad.
DESCRIPTION (int): Data Type: STRING. Description of job to be shown in dynamic ad.
IMAGE_URL (int): Data Type: URL. Image to be displayed in the ad. Highly recommended for
image ads.
CATEGORY (int): Data Type: STRING. Category of property used to group like items together
for recommendation engine.
CONTEXTUAL_KEYWORDS (int): Data Type: STRING\_LIST. Keywords used for product retrieval.
ADDRESS (int): Data Type: STRING. Complete property address, including postal code.
SALARY (int): Data Type: STRING. Salary or salary range of job to be shown in dynamic
ad.
FINAL_URLS (int): Data Type: URL\_LIST. Required. Final URLs to be used in ad when using
Upgraded URLs; the more specific the better (e.g. the individual URL of
a specific job and its location).
FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. Final mobile URLs for the ad when using Upgraded
URLs.
TRACKING_URL (int): Data Type: URL. Tracking template for the ad when using Upgraded URLs.
ANDROID_APP_LINK (int): Data Type: STRING. Android app link. Must be formatted as:
android-app://{package\_id}/{scheme}/{host\_path}. The components are
defined as follows: package\_id: app ID as specified in Google Play.
scheme: the scheme to pass to the application. Can be HTTP, or a custom
scheme. host\_path: identifies the specific content within your
application.
SIMILAR_JOB_IDS (int): Data Type: STRING\_LIST. List of recommended job IDs to show together
with this item.
IOS_APP_LINK (int): Data Type: STRING. iOS app link.
IOS_APP_STORE_ID (int): Data Type: INT64. iOS app store ID.
"""
UNSPECIFIED = 0
UNKNOWN = 1
JOB_ID = 2
LOCATION_ID = 3
TITLE = 4
SUBTITLE = 5
DESCRIPTION = 6
IMAGE_URL = 7
CATEGORY = 8
CONTEXTUAL_KEYWORDS = 9
ADDRESS = 10
SALARY = 11
FINAL_URLS = 12
FINAL_MOBILE_URLS = 14
TRACKING_URL = 15
ANDROID_APP_LINK = 16
SIMILAR_JOB_IDS = 17
IOS_APP_LINK = 18
IOS_APP_STORE_ID = 19
class KeywordMatchTypeEnum(object):
class KeywordMatchType(enum.IntEnum):
"""
Possible Keyword match types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
EXACT (int): Exact match.
PHRASE (int): Phrase match.
BROAD (int): Broad match.
"""
UNSPECIFIED = 0
UNKNOWN = 1
EXACT = 2
PHRASE = 3
BROAD = 4
class KeywordPlanAdGroupErrorEnum(object):
class KeywordPlanAdGroupError(enum.IntEnum):
"""
Enum describing possible errors from applying a keyword plan ad group.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
INVALID_NAME (int): The keyword plan ad group name is missing, empty, longer than allowed
limit or contains invalid chars.
DUPLICATE_NAME (int): The keyword plan ad group name is duplicate to an existing keyword plan
AdGroup name or other keyword plan AdGroup name in the request.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_NAME = 2
DUPLICATE_NAME = 3
class KeywordPlanCampaignErrorEnum(object):
class KeywordPlanCampaignError(enum.IntEnum):
"""
Enum describing possible errors from applying a keyword plan campaign.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
INVALID_NAME (int): A keyword plan campaign name is missing, empty, longer than allowed limit
or contains invalid chars.
INVALID_LANGUAGES (int): A keyword plan campaign contains one or more untargetable languages.
INVALID_GEOS (int): A keyword plan campaign contains one or more invalid geo targets.
DUPLICATE_NAME (int): The keyword plan campaign name is duplicate to an existing keyword plan
campaign name or other keyword plan campaign name in the request.
MAX_GEOS_EXCEEDED (int): The number of geo targets in the keyword plan campaign exceeds limits.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_NAME = 2
INVALID_LANGUAGES = 3
INVALID_GEOS = 4
DUPLICATE_NAME = 5
MAX_GEOS_EXCEEDED = 6
class KeywordPlanCompetitionLevelEnum(object):
class KeywordPlanCompetitionLevel(enum.IntEnum):
"""
Competition level of a keyword.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
LOW (int): Low competition.
MEDIUM (int): Medium competition.
HIGH (int): High competition.
"""
UNSPECIFIED = 0
UNKNOWN = 1
LOW = 2
MEDIUM = 3
HIGH = 4
class KeywordPlanErrorEnum(object):
class KeywordPlanError(enum.IntEnum):
"""
Enum describing possible errors from applying a keyword plan.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
BID_MULTIPLIER_OUT_OF_RANGE (int): The plan's bid multiplier value is outside the valid range.
BID_TOO_HIGH (int): The plan's bid value is too high.
BID_TOO_LOW (int): The plan's bid value is too low.
BID_TOO_MANY_FRACTIONAL_DIGITS (int): The plan's cpc bid is not a multiple of the minimum billable unit.
DAILY_BUDGET_TOO_LOW (int): The plan's daily budget value is too low.
DAILY_BUDGET_TOO_MANY_FRACTIONAL_DIGITS (int): The plan's daily budget is not a multiple of the minimum billable unit.
INVALID_VALUE (int): The input has an invalid value.
KEYWORD_PLAN_HAS_NO_KEYWORDS (int): The plan has no keyword.
KEYWORD_PLAN_NOT_ENABLED (int): The plan is not enabled and API cannot provide mutation, forecast or
stats.
KEYWORD_PLAN_NOT_FOUND (int): The requested plan cannot be found for providing forecast or stats.
MISSING_BID (int): The plan is missing a cpc bid.
MISSING_FORECAST_PERIOD (int): The plan is missing required forecast\_period field.
INVALID_FORECAST_DATE_RANGE (int): The plan's forecast\_period has invalid forecast date range.
INVALID_NAME (int): The plan's name is invalid.
"""
UNSPECIFIED = 0
UNKNOWN = 1
BID_MULTIPLIER_OUT_OF_RANGE = 2
BID_TOO_HIGH = 3
BID_TOO_LOW = 4
BID_TOO_MANY_FRACTIONAL_DIGITS = 5
DAILY_BUDGET_TOO_LOW = 6
DAILY_BUDGET_TOO_MANY_FRACTIONAL_DIGITS = 7
INVALID_VALUE = 8
KEYWORD_PLAN_HAS_NO_KEYWORDS = 9
KEYWORD_PLAN_NOT_ENABLED = 10
KEYWORD_PLAN_NOT_FOUND = 11
MISSING_BID = 13
MISSING_FORECAST_PERIOD = 14
INVALID_FORECAST_DATE_RANGE = 15
INVALID_NAME = 16
class KeywordPlanForecastIntervalEnum(object):
class KeywordPlanForecastInterval(enum.IntEnum):
"""
Forecast intervals.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
NEXT_WEEK (int): The next week date range for keyword plan. The next week is based
on the default locale of the user's account and is mostly SUN-SAT or
MON-SUN.
This can be different from next-7 days.
NEXT_MONTH (int): The next month date range for keyword plan.
NEXT_QUARTER (int): The next quarter date range for keyword plan.
"""
UNSPECIFIED = 0
UNKNOWN = 1
NEXT_WEEK = 3
NEXT_MONTH = 4
NEXT_QUARTER = 5
class KeywordPlanIdeaErrorEnum(object):
class KeywordPlanIdeaError(enum.IntEnum):
"""
Enum describing possible errors from KeywordPlanIdeaService.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
URL_CRAWL_ERROR (int): Error when crawling the input URL.
INVALID_VALUE (int): The input has an invalid value.
"""
UNSPECIFIED = 0
UNKNOWN = 1
URL_CRAWL_ERROR = 2
INVALID_VALUE = 3
class KeywordPlanKeywordErrorEnum(object):
class KeywordPlanKeywordError(enum.IntEnum):
"""
Enum describing possible errors from applying a keyword plan keyword.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
INVALID_KEYWORD_MATCH_TYPE (int): A keyword or negative keyword has invalid match type.
DUPLICATE_KEYWORD (int): A keyword or negative keyword with same text and match type already
exists.
KEYWORD_TEXT_TOO_LONG (int): Keyword or negative keyword text exceeds the allowed limit.
KEYWORD_HAS_INVALID_CHARS (int): Keyword or negative keyword text has invalid characters or symbols.
KEYWORD_HAS_TOO_MANY_WORDS (int): Keyword or negative keyword text has too many words.
INVALID_KEYWORD_TEXT (int): Keyword or negative keyword has invalid text.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_KEYWORD_MATCH_TYPE = 2
DUPLICATE_KEYWORD = 3
KEYWORD_TEXT_TOO_LONG = 4
KEYWORD_HAS_INVALID_CHARS = 5
KEYWORD_HAS_TOO_MANY_WORDS = 6
INVALID_KEYWORD_TEXT = 7
class KeywordPlanNegativeKeywordErrorEnum(object):
class KeywordPlanNegativeKeywordError(enum.IntEnum):
"""
Enum describing possible errors from applying a keyword plan negative
keyword.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
"""
UNSPECIFIED = 0
UNKNOWN = 1
class KeywordPlanNetworkEnum(object):
class KeywordPlanNetwork(enum.IntEnum):
"""
Enumerates keyword plan forecastable network types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
GOOGLE_SEARCH (int): Google Search.
GOOGLE_SEARCH_AND_PARTNERS (int): Google Search + Search partners.
"""
UNSPECIFIED = 0
UNKNOWN = 1
GOOGLE_SEARCH = 2
GOOGLE_SEARCH_AND_PARTNERS = 3
class LabelErrorEnum(object):
class LabelError(enum.IntEnum):
"""
Enum describing possible label errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CANNOT_APPLY_INACTIVE_LABEL (int): An inactive label cannot be applied.
CANNOT_APPLY_LABEL_TO_DISABLED_AD_GROUP_CRITERION (int): A label cannot be applied to a disabled ad group criterion.
CANNOT_APPLY_LABEL_TO_NEGATIVE_AD_GROUP_CRITERION (int): A label cannot be applied to a negative ad group criterion.
EXCEEDED_LABEL_LIMIT_PER_TYPE (int): Cannot apply more than 50 labels per resource.
INVALID_RESOURCE_FOR_MANAGER_LABEL (int): Labels from a manager account cannot be applied to campaign, ad group,
ad group ad, or ad group criterion resources.
DUPLICATE_NAME (int): Label names must be unique.
INVALID_LABEL_NAME (int): Label names cannot be empty.
CANNOT_ATTACH_LABEL_TO_DRAFT (int): Labels cannot be applied to a draft.
CANNOT_ATTACH_NON_MANAGER_LABEL_TO_CUSTOMER (int): Labels not from a manager account cannot be applied to the customer
resource.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CANNOT_APPLY_INACTIVE_LABEL = 2
CANNOT_APPLY_LABEL_TO_DISABLED_AD_GROUP_CRITERION = 3
CANNOT_APPLY_LABEL_TO_NEGATIVE_AD_GROUP_CRITERION = 4
EXCEEDED_LABEL_LIMIT_PER_TYPE = 5
INVALID_RESOURCE_FOR_MANAGER_LABEL = 6
DUPLICATE_NAME = 7
INVALID_LABEL_NAME = 8
CANNOT_ATTACH_LABEL_TO_DRAFT = 9
CANNOT_ATTACH_NON_MANAGER_LABEL_TO_CUSTOMER = 10
class LabelStatusEnum(object):
class LabelStatus(enum.IntEnum):
"""
Possible statuses of a label.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ENABLED (int): Label is enabled.
REMOVED (int): Label is removed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
class LanguageCodeErrorEnum(object):
class LanguageCodeError(enum.IntEnum):
"""
Enum describing language code errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
LANGUAGE_CODE_NOT_FOUND (int): The input language code is not recognized.
INVALID_LANGUAGE_CODE (int): The language is not allowed to use.
"""
UNSPECIFIED = 0
UNKNOWN = 1
LANGUAGE_CODE_NOT_FOUND = 2
INVALID_LANGUAGE_CODE = 3
class LegacyAppInstallAdAppStoreEnum(object):
class LegacyAppInstallAdAppStore(enum.IntEnum):
"""
App store type in a legacy app install ad.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
APPLE_APP_STORE (int): Apple iTunes.
GOOGLE_PLAY (int): Google Play.
WINDOWS_STORE (int): Windows Store.
WINDOWS_PHONE_STORE (int): Windows Phone Store.
CN_APP_STORE (int): The app is hosted in a Chinese app store.
"""
UNSPECIFIED = 0
UNKNOWN = 1
APPLE_APP_STORE = 2
GOOGLE_PLAY = 3
WINDOWS_STORE = 4
WINDOWS_PHONE_STORE = 5
CN_APP_STORE = 6
class ListOperationErrorEnum(object):
class ListOperationError(enum.IntEnum):
"""
Enum describing possible list operation errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
REQUIRED_FIELD_MISSING (int): Field required in value is missing.
DUPLICATE_VALUES (int): Duplicate or identical value is sent in multiple list operations.
"""
UNSPECIFIED = 0
UNKNOWN = 1
REQUIRED_FIELD_MISSING = 7
DUPLICATE_VALUES = 8
class ListingCustomAttributeIndexEnum(object):
class ListingCustomAttributeIndex(enum.IntEnum):
"""
The index of the listing custom attribute.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
INDEX0 (int): First listing custom attribute.
INDEX1 (int): Second listing custom attribute.
INDEX2 (int): Third listing custom attribute.
INDEX3 (int): Fourth listing custom attribute.
INDEX4 (int): Fifth listing custom attribute.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INDEX0 = 7
INDEX1 = 8
INDEX2 = 9
INDEX3 = 10
INDEX4 = 11
class ListingGroupTypeEnum(object):
class ListingGroupType(enum.IntEnum):
"""
The type of the listing group.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
SUBDIVISION (int): Subdivision of products along some listing dimension. These nodes
are not used by serving to target listing entries, but is purely
to define the structure of the tree.
UNIT (int): Listing group unit that defines a bid.
"""
UNSPECIFIED = 0
UNKNOWN = 1
SUBDIVISION = 2
UNIT = 3
class LocalPlaceholderFieldEnum(object):
class LocalPlaceholderField(enum.IntEnum):
"""
Possible values for Local placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
DEAL_ID (int): Data Type: STRING. Required. Unique ID.
DEAL_NAME (int): Data Type: STRING. Required. Main headline with local deal title to be
shown in dynamic ad.
SUBTITLE (int): Data Type: STRING. Local deal subtitle to be shown in dynamic ad.
DESCRIPTION (int): Data Type: STRING. Description of local deal to be shown in dynamic ad.
PRICE (int): Data Type: STRING. Price to be shown in the ad. Highly recommended for
dynamic ads. Example: "100.00 USD"
FORMATTED_PRICE (int): Data Type: STRING. Formatted price to be shown in the ad.
Example: "Starting at $100.00 USD", "$80 - $100"
SALE_PRICE (int): Data Type: STRING. Sale price to be shown in the ad.
Example: "80.00 USD"
FORMATTED_SALE_PRICE (int): Data Type: STRING. Formatted sale price to be shown in the ad.
Example: "On sale for $80.00", "$60 - $80"
IMAGE_URL (int): Data Type: URL. Image to be displayed in the ad.
ADDRESS (int): Data Type: STRING. Complete property address, including postal code.
CATEGORY (int): Data Type: STRING. Category of local deal used to group like items
together for recommendation engine.
CONTEXTUAL_KEYWORDS (int): Data Type: STRING\_LIST. Keywords used for product retrieval.
FINAL_URLS (int): Data Type: URL\_LIST. Required. Final URLs to be used in ad when using
Upgraded URLs; the more specific the better (e.g. the individual URL of
a specific local deal and its location).
FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. Final mobile URLs for the ad when using Upgraded
URLs.
TRACKING_URL (int): Data Type: URL. Tracking template for the ad when using Upgraded URLs.
ANDROID_APP_LINK (int): Data Type: STRING. Android app link. Must be formatted as:
android-app://{package\_id}/{scheme}/{host\_path}. The components are
defined as follows: package\_id: app ID as specified in Google Play.
scheme: the scheme to pass to the application. Can be HTTP, or a custom
scheme. host\_path: identifies the specific content within your
application.
SIMILAR_DEAL_IDS (int): Data Type: STRING\_LIST. List of recommended local deal IDs to show
together with this item.
IOS_APP_LINK (int): Data Type: STRING. iOS app link.
IOS_APP_STORE_ID (int): Data Type: INT64. iOS app store ID.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DEAL_ID = 2
DEAL_NAME = 3
SUBTITLE = 4
DESCRIPTION = 5
PRICE = 6
FORMATTED_PRICE = 7
SALE_PRICE = 8
FORMATTED_SALE_PRICE = 9
IMAGE_URL = 10
ADDRESS = 11
CATEGORY = 12
CONTEXTUAL_KEYWORDS = 13
FINAL_URLS = 14
FINAL_MOBILE_URLS = 15
TRACKING_URL = 16
ANDROID_APP_LINK = 17
SIMILAR_DEAL_IDS = 18
IOS_APP_LINK = 19
IOS_APP_STORE_ID = 20
class LocationExtensionTargetingCriterionFieldEnum(object):
class LocationExtensionTargetingCriterionField(enum.IntEnum):
"""
Possible values for Location Extension Targeting criterion fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ADDRESS_LINE_1 (int): Data Type: STRING. Line 1 of the business address.
ADDRESS_LINE_2 (int): Data Type: STRING. Line 2 of the business address.
CITY (int): Data Type: STRING. City of the business address.
PROVINCE (int): Data Type: STRING. Province of the business address.
POSTAL_CODE (int): Data Type: STRING. Postal code of the business address.
COUNTRY_CODE (int): Data Type: STRING. Country code of the business address.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ADDRESS_LINE_1 = 2
ADDRESS_LINE_2 = 3
CITY = 4
PROVINCE = 5
POSTAL_CODE = 6
COUNTRY_CODE = 7
class LocationPlaceholderFieldEnum(object):
class LocationPlaceholderField(enum.IntEnum):
"""
Possible values for Location placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
BUSINESS_NAME (int): Data Type: STRING. The name of the business.
ADDRESS_LINE_1 (int): Data Type: STRING. Line 1 of the business address.
ADDRESS_LINE_2 (int): Data Type: STRING. Line 2 of the business address.
CITY (int): Data Type: STRING. City of the business address.
PROVINCE (int): Data Type: STRING. Province of the business address.
POSTAL_CODE (int): Data Type: STRING. Postal code of the business address.
COUNTRY_CODE (int): Data Type: STRING. Country code of the business address.
PHONE_NUMBER (int): Data Type: STRING. Phone number of the business.
"""
UNSPECIFIED = 0
UNKNOWN = 1
BUSINESS_NAME = 2
ADDRESS_LINE_1 = 3
ADDRESS_LINE_2 = 4
CITY = 5
PROVINCE = 6
POSTAL_CODE = 7
COUNTRY_CODE = 8
PHONE_NUMBER = 9
class ManagerLinkStatusEnum(object):
class ManagerLinkStatus(enum.IntEnum):
"""
Possible statuses of a link.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ACTIVE (int): Indicates current in-effect relationship
INACTIVE (int): Indicates terminated relationship
PENDING (int): Indicates relationship has been requested by manager, but the client
hasn't accepted yet.
REFUSED (int): Relationship was requested by the manager, but the client has refused.
CANCELED (int): Indicates relationship has been requested by manager, but manager
canceled it.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ACTIVE = 2
INACTIVE = 3
PENDING = 4
REFUSED = 5
CANCELED = 6
class MatchingFunctionContextTypeEnum(object):
class MatchingFunctionContextType(enum.IntEnum):
"""
Possible context types for an operand in a matching function.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
FEED_ITEM_ID (int): Feed item id in the request context.
DEVICE_NAME (int): The device being used (possible values are 'Desktop' or 'Mobile').
"""
UNSPECIFIED = 0
UNKNOWN = 1
FEED_ITEM_ID = 2
DEVICE_NAME = 3
class MatchingFunctionOperatorEnum(object):
class MatchingFunctionOperator(enum.IntEnum):
"""
Possible operators in a matching function.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
IN (int): The IN operator.
IDENTITY (int): The IDENTITY operator.
EQUALS (int): The EQUALS operator
AND (int): Operator that takes two or more operands that are of type
FunctionOperand and checks that all the operands evaluate to true. For
functions related to ad formats, all the operands must be in
left\_operands.
CONTAINS_ANY (int): Operator that returns true if the elements in left\_operands contain any
of the elements in right\_operands. Otherwise, return false. The
right\_operands must contain at least 1 and no more than 3
ConstantOperands.
"""
UNSPECIFIED = 0
UNKNOWN = 1
IN = 2
IDENTITY = 3
EQUALS = 4
AND = 5
CONTAINS_ANY = 6
class MediaBundleErrorEnum(object):
class MediaBundleError(enum.IntEnum):
"""
Enum describing possible media bundle errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
BAD_REQUEST (int): There was a problem with the request.
DOUBLECLICK_BUNDLE_NOT_ALLOWED (int): HTML5 ads using DoubleClick Studio created ZIP files are not supported.
EXTERNAL_URL_NOT_ALLOWED (int): Cannot reference URL external to the media bundle.
FILE_TOO_LARGE (int): Media bundle file is too large.
GOOGLE_WEB_DESIGNER_ZIP_FILE_NOT_PUBLISHED (int): ZIP file from Google Web Designer is not published.
INVALID_INPUT (int): Input was invalid.
INVALID_MEDIA_BUNDLE (int): There was a problem with the media bundle.
INVALID_MEDIA_BUNDLE_ENTRY (int): There was a problem with one or more of the media bundle entries.
INVALID_MIME_TYPE (int): The media bundle contains a file with an unknown mime type
INVALID_PATH (int): The media bundle contain an invalid asset path.
INVALID_URL_REFERENCE (int): HTML5 ad is trying to reference an asset not in .ZIP file
MEDIA_DATA_TOO_LARGE (int): Media data is too large.
MISSING_PRIMARY_MEDIA_BUNDLE_ENTRY (int): The media bundle contains no primary entry.
SERVER_ERROR (int): There was an error on the server.
STORAGE_ERROR (int): The image could not be stored.
SWIFFY_BUNDLE_NOT_ALLOWED (int): Media bundle created with the Swiffy tool is not allowed.
TOO_MANY_FILES (int): The media bundle contains too many files.
UNEXPECTED_SIZE (int): The media bundle is not of legal dimensions.
UNSUPPORTED_GOOGLE_WEB_DESIGNER_ENVIRONMENT (int): Google Web Designer not created for "Google Ads" environment.
UNSUPPORTED_HTML5_FEATURE (int): Unsupported HTML5 feature in HTML5 asset.
URL_IN_MEDIA_BUNDLE_NOT_SSL_COMPLIANT (int): URL in HTML5 entry is not ssl compliant.
CUSTOM_EXIT_NOT_ALLOWED (int): Custom exits not allowed in HTML5 entry.
"""
UNSPECIFIED = 0
UNKNOWN = 1
BAD_REQUEST = 3
DOUBLECLICK_BUNDLE_NOT_ALLOWED = 4
EXTERNAL_URL_NOT_ALLOWED = 5
FILE_TOO_LARGE = 6
GOOGLE_WEB_DESIGNER_ZIP_FILE_NOT_PUBLISHED = 7
INVALID_INPUT = 8
INVALID_MEDIA_BUNDLE = 9
INVALID_MEDIA_BUNDLE_ENTRY = 10
INVALID_MIME_TYPE = 11
INVALID_PATH = 12
INVALID_URL_REFERENCE = 13
MEDIA_DATA_TOO_LARGE = 14
MISSING_PRIMARY_MEDIA_BUNDLE_ENTRY = 15
SERVER_ERROR = 16
STORAGE_ERROR = 17
SWIFFY_BUNDLE_NOT_ALLOWED = 18
TOO_MANY_FILES = 19
UNEXPECTED_SIZE = 20
UNSUPPORTED_GOOGLE_WEB_DESIGNER_ENVIRONMENT = 21
UNSUPPORTED_HTML5_FEATURE = 22
URL_IN_MEDIA_BUNDLE_NOT_SSL_COMPLIANT = 23
CUSTOM_EXIT_NOT_ALLOWED = 24
class MediaFileErrorEnum(object):
class MediaFileError(enum.IntEnum):
"""
Enum describing possible media file errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CANNOT_CREATE_STANDARD_ICON (int): Cannot create a standard icon type.
CANNOT_SELECT_STANDARD_ICON_WITH_OTHER_TYPES (int): May only select Standard Icons alone.
CANNOT_SPECIFY_MEDIA_FILE_ID_AND_DATA (int): Image contains both a media file ID and data.
DUPLICATE_MEDIA (int): A media file with given type and reference ID already exists.
EMPTY_FIELD (int): A required field was not specified or is an empty string.
RESOURCE_REFERENCED_IN_MULTIPLE_OPS (int): A media file may only be modified once per call.
FIELD_NOT_SUPPORTED_FOR_MEDIA_SUB_TYPE (int): Field is not supported for the media sub type.
INVALID_MEDIA_FILE_ID (int): The media file ID is invalid.
INVALID_MEDIA_SUB_TYPE (int): The media subtype is invalid.
INVALID_MEDIA_FILE_TYPE (int): The media file type is invalid.
INVALID_MIME_TYPE (int): The mimetype is invalid.
INVALID_REFERENCE_ID (int): The media reference ID is invalid.
INVALID_YOU_TUBE_ID (int): The YouTube video ID is invalid.
MEDIA_FILE_FAILED_TRANSCODING (int): Media file has failed transcoding
MEDIA_NOT_TRANSCODED (int): Media file has not been transcoded.
MEDIA_TYPE_DOES_NOT_MATCH_MEDIA_FILE_TYPE (int): The media type does not match the actual media file's type.
NO_FIELDS_SPECIFIED (int): None of the fields have been specified.
NULL_REFERENCE_ID_AND_MEDIA_ID (int): One of reference ID or media file ID must be specified.
TOO_LONG (int): The string has too many characters.
UNSUPPORTED_TYPE (int): The specified type is not supported.
YOU_TUBE_SERVICE_UNAVAILABLE (int): YouTube is unavailable for requesting video data.
YOU_TUBE_VIDEO_HAS_NON_POSITIVE_DURATION (int): The YouTube video has a non positive duration.
YOU_TUBE_VIDEO_NOT_FOUND (int): The YouTube video ID is syntactically valid but the video was not found.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CANNOT_CREATE_STANDARD_ICON = 2
CANNOT_SELECT_STANDARD_ICON_WITH_OTHER_TYPES = 3
CANNOT_SPECIFY_MEDIA_FILE_ID_AND_DATA = 4
DUPLICATE_MEDIA = 5
EMPTY_FIELD = 6
RESOURCE_REFERENCED_IN_MULTIPLE_OPS = 7
FIELD_NOT_SUPPORTED_FOR_MEDIA_SUB_TYPE = 8
INVALID_MEDIA_FILE_ID = 9
INVALID_MEDIA_SUB_TYPE = 10
INVALID_MEDIA_FILE_TYPE = 11
INVALID_MIME_TYPE = 12
INVALID_REFERENCE_ID = 13
INVALID_YOU_TUBE_ID = 14
MEDIA_FILE_FAILED_TRANSCODING = 15
MEDIA_NOT_TRANSCODED = 16
MEDIA_TYPE_DOES_NOT_MATCH_MEDIA_FILE_TYPE = 17
NO_FIELDS_SPECIFIED = 18
NULL_REFERENCE_ID_AND_MEDIA_ID = 19
TOO_LONG = 20
UNSUPPORTED_TYPE = 21
YOU_TUBE_SERVICE_UNAVAILABLE = 22
YOU_TUBE_VIDEO_HAS_NON_POSITIVE_DURATION = 23
YOU_TUBE_VIDEO_NOT_FOUND = 24
class MediaTypeEnum(object):
class MediaType(enum.IntEnum):
"""
The type of media.
Attributes:
UNSPECIFIED (int): The media type has not been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
IMAGE (int): Static image, used for image ad.
ICON (int): Small image, used for map ad.
MEDIA_BUNDLE (int): ZIP file, used in fields of template ads.
AUDIO (int): Audio file.
VIDEO (int): Video file.
DYNAMIC_IMAGE (int): Animated image, such as animated GIF.
"""
UNSPECIFIED = 0
UNKNOWN = 1
IMAGE = 2
ICON = 3
MEDIA_BUNDLE = 4
AUDIO = 5
VIDEO = 6
DYNAMIC_IMAGE = 7
class MerchantCenterLinkStatusEnum(object):
class MerchantCenterLinkStatus(enum.IntEnum):
"""
Describes the possible statuses for a link between a Google Ads customer
and a Google Merchant Center account.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ENABLED (int): The link is enabled.
PENDING (int): The link has no effect. It was proposed by the Merchant Center Account
owner and hasn't been confirmed by the customer.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
PENDING = 3
class MessagePlaceholderFieldEnum(object):
class MessagePlaceholderField(enum.IntEnum):
"""
Possible values for Message placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
BUSINESS_NAME (int): Data Type: STRING. The name of your business.
COUNTRY_CODE (int): Data Type: STRING. Country code of phone number.
PHONE_NUMBER (int): Data Type: STRING. A phone number that's capable of sending and receiving
text messages.
MESSAGE_EXTENSION_TEXT (int): Data Type: STRING. The text that will go in your click-to-message ad.
MESSAGE_TEXT (int): Data Type: STRING. The message text automatically shows in people's
messaging apps when they tap to send you a message.
"""
UNSPECIFIED = 0
UNKNOWN = 1
BUSINESS_NAME = 2
COUNTRY_CODE = 3
PHONE_NUMBER = 4
MESSAGE_EXTENSION_TEXT = 5
MESSAGE_TEXT = 6
class MimeTypeEnum(object):
class MimeType(enum.IntEnum):
"""
The mime type
Attributes:
UNSPECIFIED (int): The mime type has not been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
IMAGE_JPEG (int): MIME type of image/jpeg.
IMAGE_GIF (int): MIME type of image/gif.
IMAGE_PNG (int): MIME type of image/png.
FLASH (int): MIME type of application/x-shockwave-flash.
TEXT_HTML (int): MIME type of text/html.
PDF (int): MIME type of application/pdf.
MSWORD (int): MIME type of application/msword.
MSEXCEL (int): MIME type of application/vnd.ms-excel.
RTF (int): MIME type of application/rtf.
AUDIO_WAV (int): MIME type of audio/wav.
AUDIO_MP3 (int): MIME type of audio/mp3.
HTML5_AD_ZIP (int): MIME type of application/x-html5-ad-zip.
"""
UNSPECIFIED = 0
UNKNOWN = 1
IMAGE_JPEG = 2
IMAGE_GIF = 3
IMAGE_PNG = 4
FLASH = 5
TEXT_HTML = 6
PDF = 7
MSWORD = 8
MSEXCEL = 9
RTF = 10
AUDIO_WAV = 11
AUDIO_MP3 = 12
HTML5_AD_ZIP = 13
class MinuteOfHourEnum(object):
class MinuteOfHour(enum.IntEnum):
"""
Enumerates of quarter-hours. E.g. "FIFTEEN"
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
ZERO (int): Zero minutes past the hour.
FIFTEEN (int): Fifteen minutes past the hour.
THIRTY (int): Thirty minutes past the hour.
FORTY_FIVE (int): Forty-five minutes past the hour.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ZERO = 2
FIFTEEN = 3
THIRTY = 4
FORTY_FIVE = 5
class MobileDeviceTypeEnum(object):
class MobileDeviceType(enum.IntEnum):
"""
The type of mobile device.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
MOBILE (int): Mobile phones.
TABLET (int): Tablets.
"""
UNSPECIFIED = 0
UNKNOWN = 1
MOBILE = 2
TABLET = 3
class MonthOfYearEnum(object):
class MonthOfYear(enum.IntEnum):
"""
Enumerates months of the year, e.g., "January".
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
JANUARY (int): January.
FEBRUARY (int): February.
MARCH (int): March.
APRIL (int): April.
MAY (int): May.
JUNE (int): June.
JULY (int): July.
AUGUST (int): August.
SEPTEMBER (int): September.
OCTOBER (int): October.
NOVEMBER (int): November.
DECEMBER (int): December.
"""
UNSPECIFIED = 0
UNKNOWN = 1
JANUARY = 2
FEBRUARY = 3
MARCH = 4
APRIL = 5
MAY = 6
JUNE = 7
JULY = 8
AUGUST = 9
SEPTEMBER = 10
OCTOBER = 11
NOVEMBER = 12
DECEMBER = 13
class MultiplierErrorEnum(object):
class MultiplierError(enum.IntEnum):
"""
Enum describing possible multiplier errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
MULTIPLIER_TOO_HIGH (int): Multiplier value is too high
MULTIPLIER_TOO_LOW (int): Multiplier value is too low
TOO_MANY_FRACTIONAL_DIGITS (int): Too many fractional digits
MULTIPLIER_NOT_ALLOWED_FOR_BIDDING_STRATEGY (int): A multiplier cannot be set for this bidding strategy
MULTIPLIER_NOT_ALLOWED_WHEN_BASE_BID_IS_MISSING (int): A multiplier cannot be set when there is no base bid (e.g., content max
cpc)
NO_MULTIPLIER_SPECIFIED (int): A bid multiplier must be specified
MULTIPLIER_CAUSES_BID_TO_EXCEED_DAILY_BUDGET (int): Multiplier causes bid to exceed daily budget
MULTIPLIER_CAUSES_BID_TO_EXCEED_MONTHLY_BUDGET (int): Multiplier causes bid to exceed monthly budget
MULTIPLIER_CAUSES_BID_TO_EXCEED_CUSTOM_BUDGET (int): Multiplier causes bid to exceed custom budget
MULTIPLIER_CAUSES_BID_TO_EXCEED_MAX_ALLOWED_BID (int): Multiplier causes bid to exceed maximum allowed bid
BID_LESS_THAN_MIN_ALLOWED_BID_WITH_MULTIPLIER (int): Multiplier causes bid to become less than the minimum bid allowed
MULTIPLIER_AND_BIDDING_STRATEGY_TYPE_MISMATCH (int): Multiplier type (cpc vs. cpm) needs to match campaign's bidding strategy
"""
UNSPECIFIED = 0
UNKNOWN = 1
MULTIPLIER_TOO_HIGH = 2
MULTIPLIER_TOO_LOW = 3
TOO_MANY_FRACTIONAL_DIGITS = 4
MULTIPLIER_NOT_ALLOWED_FOR_BIDDING_STRATEGY = 5
MULTIPLIER_NOT_ALLOWED_WHEN_BASE_BID_IS_MISSING = 6
NO_MULTIPLIER_SPECIFIED = 7
MULTIPLIER_CAUSES_BID_TO_EXCEED_DAILY_BUDGET = 8
MULTIPLIER_CAUSES_BID_TO_EXCEED_MONTHLY_BUDGET = 9
MULTIPLIER_CAUSES_BID_TO_EXCEED_CUSTOM_BUDGET = 10
MULTIPLIER_CAUSES_BID_TO_EXCEED_MAX_ALLOWED_BID = 11
BID_LESS_THAN_MIN_ALLOWED_BID_WITH_MULTIPLIER = 12
MULTIPLIER_AND_BIDDING_STRATEGY_TYPE_MISMATCH = 13
class MutateErrorEnum(object):
class MutateError(enum.IntEnum):
"""
Enum describing possible mutate errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
RESOURCE_NOT_FOUND (int): Requested resource was not found.
ID_EXISTS_IN_MULTIPLE_MUTATES (int): Cannot mutate the same resource twice in one request.
INCONSISTENT_FIELD_VALUES (int): The field's contents don't match another field that represents the same
data.
MUTATE_NOT_ALLOWED (int): Mutates are not allowed for the requested resource.
RESOURCE_NOT_IN_GOOGLE_ADS (int): The resource isn't in Google Ads. It belongs to another ads system.
RESOURCE_ALREADY_EXISTS (int): The resource being created already exists.
"""
UNSPECIFIED = 0
UNKNOWN = 1
RESOURCE_NOT_FOUND = 3
ID_EXISTS_IN_MULTIPLE_MUTATES = 7
INCONSISTENT_FIELD_VALUES = 8
MUTATE_NOT_ALLOWED = 9
RESOURCE_NOT_IN_GOOGLE_ADS = 10
RESOURCE_ALREADY_EXISTS = 11
class MutateJobErrorEnum(object):
class MutateJobError(enum.IntEnum):
"""
Enum describing possible request errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CANNOT_MODIFY_JOB_AFTER_JOB_STARTS_RUNNING (int): The mutate job cannot add more operations or run after it has started
running.
EMPTY_OPERATIONS (int): The operations for an AddMutateJobOperations request were empty.
INVALID_SEQUENCE_TOKEN (int): The sequence token for an AddMutateJobOperations request was invalid.
RESULTS_NOT_READY (int): Mutate Job Results can only be retrieved once the job is finished.
INVALID_PAGE_SIZE (int): The page size for ListMutateJobResults was invalid.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CANNOT_MODIFY_JOB_AFTER_JOB_STARTS_RUNNING = 2
EMPTY_OPERATIONS = 3
INVALID_SEQUENCE_TOKEN = 4
RESULTS_NOT_READY = 5
INVALID_PAGE_SIZE = 6
class MutateJobStatusEnum(object):
class MutateJobStatus(enum.IntEnum):
"""
The mutate job statuses.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PENDING (int): The job is not currently running.
RUNNING (int): The job is running.
DONE (int): The job is done.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PENDING = 2
RUNNING = 3
DONE = 4
class NegativeGeoTargetTypeEnum(object):
class NegativeGeoTargetType(enum.IntEnum):
"""
The possible negative geo target types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
DONT_CARE (int): Specifies that a user is excluded from seeing the ad if either their
Area of Interest (AOI) or their Location of Presence (LOP) matches the
geo target.
LOCATION_OF_PRESENCE (int): Specifies that a user is excluded from seeing the ad
only if their Location of Presence (LOP) matches the geo target.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DONT_CARE = 2
LOCATION_OF_PRESENCE = 3
class NewResourceCreationErrorEnum(object):
class NewResourceCreationError(enum.IntEnum):
"""
Enum describing possible new resource creation errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CANNOT_SET_ID_FOR_CREATE (int): Do not set the id field while creating new resources.
DUPLICATE_TEMP_IDS (int): Creating more than one resource with the same temp ID is not allowed.
TEMP_ID_RESOURCE_HAD_ERRORS (int): Parent resource with specified temp ID failed validation, so no
validation will be done for this child resource.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CANNOT_SET_ID_FOR_CREATE = 2
DUPLICATE_TEMP_IDS = 3
TEMP_ID_RESOURCE_HAD_ERRORS = 4
class NotEmptyErrorEnum(object):
class NotEmptyError(enum.IntEnum):
"""
Enum describing possible not empty errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
EMPTY_LIST (int): Empty list.
"""
UNSPECIFIED = 0
UNKNOWN = 1
EMPTY_LIST = 2
class NullErrorEnum(object):
class NullError(enum.IntEnum):
"""
Enum describing possible null errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
NULL_CONTENT (int): Specified list/container must not contain any null elements
"""
UNSPECIFIED = 0
UNKNOWN = 1
NULL_CONTENT = 2
class OperatingSystemVersionOperatorTypeEnum(object):
class OperatingSystemVersionOperatorType(enum.IntEnum):
"""
The type of operating system version.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
EQUALS_TO (int): Equals to the specified version.
GREATER_THAN_EQUALS_TO (int): Greater than or equals to the specified version.
"""
UNSPECIFIED = 0
UNKNOWN = 1
EQUALS_TO = 2
GREATER_THAN_EQUALS_TO = 4
class OperationAccessDeniedErrorEnum(object):
class OperationAccessDeniedError(enum.IntEnum):
"""
Enum describing possible operation access denied errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
ACTION_NOT_PERMITTED (int): Unauthorized invocation of a service's method (get, mutate, etc.)
CREATE_OPERATION_NOT_PERMITTED (int): Unauthorized CREATE operation in invoking a service's mutate method.
REMOVE_OPERATION_NOT_PERMITTED (int): Unauthorized REMOVE operation in invoking a service's mutate method.
UPDATE_OPERATION_NOT_PERMITTED (int): Unauthorized UPDATE operation in invoking a service's mutate method.
MUTATE_ACTION_NOT_PERMITTED_FOR_CLIENT (int): A mutate action is not allowed on this campaign, from this client.
OPERATION_NOT_PERMITTED_FOR_CAMPAIGN_TYPE (int): This operation is not permitted on this campaign type
CREATE_AS_REMOVED_NOT_PERMITTED (int): A CREATE operation may not set status to REMOVED.
OPERATION_NOT_PERMITTED_FOR_REMOVED_RESOURCE (int): This operation is not allowed because the campaign or adgroup is removed.
OPERATION_NOT_PERMITTED_FOR_AD_GROUP_TYPE (int): This operation is not permitted on this ad group type.
MUTATE_NOT_PERMITTED_FOR_CUSTOMER (int): The mutate is not allowed for this customer.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ACTION_NOT_PERMITTED = 2
CREATE_OPERATION_NOT_PERMITTED = 3
REMOVE_OPERATION_NOT_PERMITTED = 4
UPDATE_OPERATION_NOT_PERMITTED = 5
MUTATE_ACTION_NOT_PERMITTED_FOR_CLIENT = 6
OPERATION_NOT_PERMITTED_FOR_CAMPAIGN_TYPE = 7
CREATE_AS_REMOVED_NOT_PERMITTED = 8
OPERATION_NOT_PERMITTED_FOR_REMOVED_RESOURCE = 9
OPERATION_NOT_PERMITTED_FOR_AD_GROUP_TYPE = 10
MUTATE_NOT_PERMITTED_FOR_CUSTOMER = 11
class OperatorErrorEnum(object):
class OperatorError(enum.IntEnum):
"""
Enum describing possible operator errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
OPERATOR_NOT_SUPPORTED (int): Operator not supported.
"""
UNSPECIFIED = 0
UNKNOWN = 1
OPERATOR_NOT_SUPPORTED = 2
class PageOnePromotedStrategyGoalEnum(object):
class PageOnePromotedStrategyGoal(enum.IntEnum):
"""
Enum describing possible strategy goals.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
FIRST_PAGE (int): First page on google.com.
FIRST_PAGE_PROMOTED (int): Top slots of the first page on google.com.
"""
UNSPECIFIED = 0
UNKNOWN = 1
FIRST_PAGE = 2
FIRST_PAGE_PROMOTED = 3
class ParentalStatusTypeEnum(object):
class ParentalStatusType(enum.IntEnum):
"""
The type of parental statuses (e.g. not a parent).
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PARENT (int): Parent.
NOT_A_PARENT (int): Not a parent.
UNDETERMINED (int): Undetermined parental status.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PARENT = 300
NOT_A_PARENT = 301
UNDETERMINED = 302
class PartialFailureErrorEnum(object):
class PartialFailureError(enum.IntEnum):
"""
Enum describing possible partial failure errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
PARTIAL_FAILURE_MODE_REQUIRED (int): The partial failure field was false in the request.
This method requires this field be set to true.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PARTIAL_FAILURE_MODE_REQUIRED = 2
class PaymentModeEnum(object):
class PaymentMode(enum.IntEnum):
"""
Enum describing possible payment modes.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
CLICKS (int): Pay per click.
CONVERSION_VALUE (int): Pay per conversion value.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CLICKS = 4
CONVERSION_VALUE = 5
class PlaceholderTypeEnum(object):
class PlaceholderType(enum.IntEnum):
"""
Possible placeholder types for a feed mapping.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
SITELINK (int): Lets you show links in your ad to pages from your website, including the
main landing page.
CALL (int): Lets you attach a phone number to an ad, allowing customers to call
directly from the ad.
APP (int): Lets you provide users with a link that points to a mobile app in
addition to a website.
LOCATION (int): Lets you show locations of businesses from your Google My Business
account in your ad. This helps people find your locations by showing your
ads with your address, a map to your location, or the distance to your
business. This extension type is useful to draw customers to your
brick-and-mortar location.
AFFILIATE_LOCATION (int): If you sell your product through retail chains, affiliate location
extensions let you show nearby stores that carry your products.
CALLOUT (int): Lets you include additional text with your search ads that provide
detailed information about your business, including products and services
you offer. Callouts appear in ads at the top and bottom of Google search
results.
STRUCTURED_SNIPPET (int): Lets you add more info to your ad, specific to some predefined categories
such as types, brands, styles, etc. A minimum of 3 text (SNIPPETS) values
are required.
MESSAGE (int): Allows users to see your ad, click an icon, and contact you directly by
text message. With one tap on your ad, people can contact you to book an
appointment, get a quote, ask for information, or request a service.
PRICE (int): Lets you display prices for a list of items along with your ads. A price
feed is composed of three to eight price table rows.
PROMOTION (int): Allows you to highlight sales and other promotions that let users see how
they can save by buying now.
AD_CUSTOMIZER (int): Lets you dynamically inject custom data into the title and description
of your ads.
DYNAMIC_EDUCATION (int): Indicates that this feed is for education dynamic remarketing.
DYNAMIC_FLIGHT (int): Indicates that this feed is for flight dynamic remarketing.
DYNAMIC_CUSTOM (int): Indicates that this feed is for a custom dynamic remarketing type. Use
this only if the other business types don't apply to your products or
services.
DYNAMIC_HOTEL (int): Indicates that this feed is for hotels and rentals dynamic remarketing.
DYNAMIC_REAL_ESTATE (int): Indicates that this feed is for real estate dynamic remarketing.
DYNAMIC_TRAVEL (int): Indicates that this feed is for travel dynamic remarketing.
DYNAMIC_LOCAL (int): Indicates that this feed is for local deals dynamic remarketing.
DYNAMIC_JOB (int): Indicates that this feed is for job dynamic remarketing.
"""
UNSPECIFIED = 0
UNKNOWN = 1
SITELINK = 2
CALL = 3
APP = 4
LOCATION = 5
AFFILIATE_LOCATION = 6
CALLOUT = 7
STRUCTURED_SNIPPET = 8
MESSAGE = 9
PRICE = 10
PROMOTION = 11
AD_CUSTOMIZER = 12
DYNAMIC_EDUCATION = 13
DYNAMIC_FLIGHT = 14
DYNAMIC_CUSTOM = 15
DYNAMIC_HOTEL = 16
DYNAMIC_REAL_ESTATE = 17
DYNAMIC_TRAVEL = 18
DYNAMIC_LOCAL = 19
DYNAMIC_JOB = 20
class PlacementTypeEnum(object):
class PlacementType(enum.IntEnum):
"""
Possible placement types for a feed mapping.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
WEBSITE (int): Websites(e.g. 'www.flowers4sale.com').
MOBILE_APP_CATEGORY (int): Mobile application categories(e.g. 'Games').
MOBILE_APPLICATION (int): mobile applications(e.g. 'mobileapp::2-com.whatsthewordanswers').
YOUTUBE_VIDEO (int): YouTube videos(e.g. 'youtube.com/video/wtLJPvx7-ys').
YOUTUBE_CHANNEL (int): YouTube channels(e.g. 'youtube.com::L8ZULXASCc1I\_oaOT0NaOQ').
"""
UNSPECIFIED = 0
UNKNOWN = 1
WEBSITE = 2
MOBILE_APP_CATEGORY = 3
MOBILE_APPLICATION = 4
YOUTUBE_VIDEO = 5
YOUTUBE_CHANNEL = 6
class PolicyApprovalStatusEnum(object):
class PolicyApprovalStatus(enum.IntEnum):
"""
The possible policy approval statuses. When there are several approval
statuses available the most severe one will be used. The order of
severity is DISAPPROVED, AREA\_OF\_INTEREST\_ONLY, APPROVED\_LIMITED and
APPROVED.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
DISAPPROVED (int): Will not serve.
APPROVED_LIMITED (int): Serves with restrictions.
APPROVED (int): Serves without restrictions.
AREA_OF_INTEREST_ONLY (int): Will not serve in targeted countries, but may serve for users who are
searching for information about the targeted countries.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DISAPPROVED = 2
APPROVED_LIMITED = 3
APPROVED = 4
AREA_OF_INTEREST_ONLY = 5
class PolicyFindingErrorEnum(object):
class PolicyFindingError(enum.IntEnum):
"""
Enum describing possible policy finding errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
POLICY_FINDING (int): The resource has been disapproved since the policy summary includes
policy topics of type PROHIBITED.
POLICY_TOPIC_NOT_FOUND (int): The given policy topic does not exist.
"""
UNSPECIFIED = 0
UNKNOWN = 1
POLICY_FINDING = 2
POLICY_TOPIC_NOT_FOUND = 3
class PolicyReviewStatusEnum(object):
class PolicyReviewStatus(enum.IntEnum):
"""
The possible policy review statuses.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
REVIEW_IN_PROGRESS (int): Currently under review.
REVIEWED (int): Primary review complete. Other reviews may be continuing.
UNDER_APPEAL (int): The resource has been resubmitted for approval or its policy decision has
been appealed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
REVIEW_IN_PROGRESS = 2
REVIEWED = 3
UNDER_APPEAL = 4
class PolicyTopicEntryTypeEnum(object):
class PolicyTopicEntryType(enum.IntEnum):
"""
The possible policy topic entry types.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
PROHIBITED (int): The resource will not be served.
LIMITED (int): The resource will not be served under some circumstances.
DESCRIPTIVE (int): May be of interest, but does not limit how the resource is served.
BROADENING (int): Could increase coverage beyond normal.
AREA_OF_INTEREST_ONLY (int): Constrained for all targeted countries, but may serve in other countries
through area of interest.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PROHIBITED = 2
LIMITED = 4
DESCRIPTIVE = 5
BROADENING = 6
AREA_OF_INTEREST_ONLY = 7
class PolicyTopicEvidenceDestinationMismatchUrlTypeEnum(object):
class PolicyTopicEvidenceDestinationMismatchUrlType(enum.IntEnum):
"""
The possible policy topic evidence destination mismatch url types.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
DISPLAY_URL (int): The display url.
FINAL_URL (int): The final url.
FINAL_MOBILE_URL (int): The final mobile url.
TRACKING_URL (int): The tracking url template, with substituted desktop url.
MOBILE_TRACKING_URL (int): The tracking url template, with substituted mobile url.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DISPLAY_URL = 2
FINAL_URL = 3
FINAL_MOBILE_URL = 4
TRACKING_URL = 5
MOBILE_TRACKING_URL = 6
class PolicyValidationParameterErrorEnum(object):
class PolicyValidationParameterError(enum.IntEnum):
"""
Enum describing possible policy validation parameter errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
UNSUPPORTED_AD_TYPE_FOR_IGNORABLE_POLICY_TOPICS (int): Ignorable policy topics are not supported for the ad type.
UNSUPPORTED_AD_TYPE_FOR_EXEMPT_POLICY_VIOLATION_KEYS (int): Exempt policy violation keys are not supported for the ad type.
CANNOT_SET_BOTH_IGNORABLE_POLICY_TOPICS_AND_EXEMPT_POLICY_VIOLATION_KEYS (int): Cannot set ignorable policy topics and exempt policy violation keys in
the same policy violation parameter.
"""
UNSPECIFIED = 0
UNKNOWN = 1
UNSUPPORTED_AD_TYPE_FOR_IGNORABLE_POLICY_TOPICS = 2
UNSUPPORTED_AD_TYPE_FOR_EXEMPT_POLICY_VIOLATION_KEYS = 3
CANNOT_SET_BOTH_IGNORABLE_POLICY_TOPICS_AND_EXEMPT_POLICY_VIOLATION_KEYS = 4
class PolicyViolationErrorEnum(object):
class PolicyViolationError(enum.IntEnum):
"""
Enum describing possible policy violation errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
POLICY_ERROR (int): A policy was violated. See PolicyViolationDetails for more detail.
"""
UNSPECIFIED = 0
UNKNOWN = 1
POLICY_ERROR = 2
class PositiveGeoTargetTypeEnum(object):
class PositiveGeoTargetType(enum.IntEnum):
"""
The possible positive geo target types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
DONT_CARE (int): Specifies that either Area of Interest (AOI) or
Location of Presence (LOP) may trigger the ad.
AREA_OF_INTEREST (int): Specifies that the ad is triggered only if the user's Area of Interest
(AOI) matches.
LOCATION_OF_PRESENCE (int): Specifies that the ad is triggered only if the user's
Location of Presence (LOP) matches.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DONT_CARE = 2
AREA_OF_INTEREST = 3
LOCATION_OF_PRESENCE = 4
class PreferredContentTypeEnum(object):
class PreferredContentType(enum.IntEnum):
"""
Enumerates preferred content criterion type.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
YOUTUBE_TOP_CONTENT (int): Represents top content on YouTube.
"""
UNSPECIFIED = 0
UNKNOWN = 1
YOUTUBE_TOP_CONTENT = 400
class PriceExtensionPriceQualifierEnum(object):
class PriceExtensionPriceQualifier(enum.IntEnum):
"""
Enums of price extension price qualifier.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
FROM (int): 'From' qualifier for the price.
UP_TO (int): 'Up to' qualifier for the price.
AVERAGE (int): 'Average' qualifier for the price.
"""
UNSPECIFIED = 0
UNKNOWN = 1
FROM = 2
UP_TO = 3
AVERAGE = 4
class PriceExtensionPriceUnitEnum(object):
class PriceExtensionPriceUnit(enum.IntEnum):
"""
Price extension price unit.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PER_HOUR (int): Per hour.
PER_DAY (int): Per day.
PER_WEEK (int): Per week.
PER_MONTH (int): Per month.
PER_YEAR (int): Per year.
PER_NIGHT (int): Per night.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PER_HOUR = 2
PER_DAY = 3
PER_WEEK = 4
PER_MONTH = 5
PER_YEAR = 6
PER_NIGHT = 7
class PriceExtensionTypeEnum(object):
class PriceExtensionType(enum.IntEnum):
"""
Price extension type.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
BRANDS (int): The type for showing a list of brands.
EVENTS (int): The type for showing a list of events.
LOCATIONS (int): The type for showing locations relevant to your business.
NEIGHBORHOODS (int): The type for showing sub-regions or districts within a city or region.
PRODUCT_CATEGORIES (int): The type for showing a collection of product categories.
PRODUCT_TIERS (int): The type for showing a collection of related product tiers.
SERVICES (int): The type for showing a collection of services offered by your business.
SERVICE_CATEGORIES (int): The type for showing a collection of service categories.
SERVICE_TIERS (int): The type for showing a collection of related service tiers.
"""
UNSPECIFIED = 0
UNKNOWN = 1
BRANDS = 2
EVENTS = 3
LOCATIONS = 4
NEIGHBORHOODS = 5
PRODUCT_CATEGORIES = 6
PRODUCT_TIERS = 7
SERVICES = 8
SERVICE_CATEGORIES = 9
SERVICE_TIERS = 10
class PricePlaceholderFieldEnum(object):
class PricePlaceholderField(enum.IntEnum):
"""
Possible values for Price placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
TYPE (int): Data Type: STRING. The type of your price feed. Must match one of the
predefined price feed type exactly.
PRICE_QUALIFIER (int): Data Type: STRING. The qualifier of each price. Must match one of the
predefined price qualifiers exactly.
TRACKING_TEMPLATE (int): Data Type: URL. Tracking template for the price feed when using Upgraded
URLs.
LANGUAGE (int): Data Type: STRING. Language of the price feed. Must match one of the
available available locale codes exactly.
FINAL_URL_SUFFIX (int): Data Type: STRING. Final URL suffix for the price feed when using
parallel tracking.
ITEM_1_HEADER (int): Data Type: STRING. The header of item 1 of the table.
ITEM_1_DESCRIPTION (int): Data Type: STRING. The description of item 1 of the table.
ITEM_1_PRICE (int): Data Type: MONEY. The price (money with currency) of item 1 of the table,
e.g., 30 USD. The currency must match one of the available currencies.
ITEM_1_UNIT (int): Data Type: STRING. The price unit of item 1 of the table. Must match one
of the predefined price units.
ITEM_1_FINAL_URLS (int): Data Type: URL\_LIST. The final URLs of item 1 of the table when using
Upgraded URLs.
ITEM_1_FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. The final mobile URLs of item 1 of the table when
using Upgraded URLs.
ITEM_2_HEADER (int): Data Type: STRING. The header of item 2 of the table.
ITEM_2_DESCRIPTION (int): Data Type: STRING. The description of item 2 of the table.
ITEM_2_PRICE (int): Data Type: MONEY. The price (money with currency) of item 2 of the table,
e.g., 30 USD. The currency must match one of the available currencies.
ITEM_2_UNIT (int): Data Type: STRING. The price unit of item 2 of the table. Must match one
of the predefined price units.
ITEM_2_FINAL_URLS (int): Data Type: URL\_LIST. The final URLs of item 2 of the table when using
Upgraded URLs.
ITEM_2_FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. The final mobile URLs of item 2 of the table when
using Upgraded URLs.
ITEM_3_HEADER (int): Data Type: STRING. The header of item 3 of the table.
ITEM_3_DESCRIPTION (int): Data Type: STRING. The description of item 3 of the table.
ITEM_3_PRICE (int): Data Type: MONEY. The price (money with currency) of item 3 of the table,
e.g., 30 USD. The currency must match one of the available currencies.
ITEM_3_UNIT (int): Data Type: STRING. The price unit of item 3 of the table. Must match one
of the predefined price units.
ITEM_3_FINAL_URLS (int): Data Type: URL\_LIST. The final URLs of item 3 of the table when using
Upgraded URLs.
ITEM_3_FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. The final mobile URLs of item 3 of the table when
using Upgraded URLs.
ITEM_4_HEADER (int): Data Type: STRING. The header of item 4 of the table.
ITEM_4_DESCRIPTION (int): Data Type: STRING. The description of item 4 of the table.
ITEM_4_PRICE (int): Data Type: MONEY. The price (money with currency) of item 4 of the table,
e.g., 30 USD. The currency must match one of the available currencies.
ITEM_4_UNIT (int): Data Type: STRING. The price unit of item 4 of the table. Must match one
of the predefined price units.
ITEM_4_FINAL_URLS (int): Data Type: URL\_LIST. The final URLs of item 4 of the table when using
Upgraded URLs.
ITEM_4_FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. The final mobile URLs of item 4 of the table when
using Upgraded URLs.
ITEM_5_HEADER (int): Data Type: STRING. The header of item 5 of the table.
ITEM_5_DESCRIPTION (int): Data Type: STRING. The description of item 5 of the table.
ITEM_5_PRICE (int): Data Type: MONEY. The price (money with currency) of item 5 of the table,
e.g., 30 USD. The currency must match one of the available currencies.
ITEM_5_UNIT (int): Data Type: STRING. The price unit of item 5 of the table. Must match one
of the predefined price units.
ITEM_5_FINAL_URLS (int): Data Type: URL\_LIST. The final URLs of item 5 of the table when using
Upgraded URLs.
ITEM_5_FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. The final mobile URLs of item 5 of the table when
using Upgraded URLs.
ITEM_6_HEADER (int): Data Type: STRING. The header of item 6 of the table.
ITEM_6_DESCRIPTION (int): Data Type: STRING. The description of item 6 of the table.
ITEM_6_PRICE (int): Data Type: MONEY. The price (money with currency) of item 6 of the table,
e.g., 30 USD. The currency must match one of the available currencies.
ITEM_6_UNIT (int): Data Type: STRING. The price unit of item 6 of the table. Must match one
of the predefined price units.
ITEM_6_FINAL_URLS (int): Data Type: URL\_LIST. The final URLs of item 6 of the table when using
Upgraded URLs.
ITEM_6_FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. The final mobile URLs of item 6 of the table when
using Upgraded URLs.
ITEM_7_HEADER (int): Data Type: STRING. The header of item 7 of the table.
ITEM_7_DESCRIPTION (int): Data Type: STRING. The description of item 7 of the table.
ITEM_7_PRICE (int): Data Type: MONEY. The price (money with currency) of item 7 of the table,
e.g., 30 USD. The currency must match one of the available currencies.
ITEM_7_UNIT (int): Data Type: STRING. The price unit of item 7 of the table. Must match one
of the predefined price units.
ITEM_7_FINAL_URLS (int): Data Type: URL\_LIST. The final URLs of item 7 of the table when using
Upgraded URLs.
ITEM_7_FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. The final mobile URLs of item 7 of the table when
using Upgraded URLs.
ITEM_8_HEADER (int): Data Type: STRING. The header of item 8 of the table.
ITEM_8_DESCRIPTION (int): Data Type: STRING. The description of item 8 of the table.
ITEM_8_PRICE (int): Data Type: MONEY. The price (money with currency) of item 8 of the table,
e.g., 30 USD. The currency must match one of the available currencies.
ITEM_8_UNIT (int): Data Type: STRING. The price unit of item 8 of the table. Must match one
of the predefined price units.
ITEM_8_FINAL_URLS (int): Data Type: URL\_LIST. The final URLs of item 8 of the table when using
Upgraded URLs.
ITEM_8_FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. The final mobile URLs of item 8 of the table when
using Upgraded URLs.
"""
UNSPECIFIED = 0
UNKNOWN = 1
TYPE = 2
PRICE_QUALIFIER = 3
TRACKING_TEMPLATE = 4
LANGUAGE = 5
FINAL_URL_SUFFIX = 6
ITEM_1_HEADER = 100
ITEM_1_DESCRIPTION = 101
ITEM_1_PRICE = 102
ITEM_1_UNIT = 103
ITEM_1_FINAL_URLS = 104
ITEM_1_FINAL_MOBILE_URLS = 105
ITEM_2_HEADER = 200
ITEM_2_DESCRIPTION = 201
ITEM_2_PRICE = 202
ITEM_2_UNIT = 203
ITEM_2_FINAL_URLS = 204
ITEM_2_FINAL_MOBILE_URLS = 205
ITEM_3_HEADER = 300
ITEM_3_DESCRIPTION = 301
ITEM_3_PRICE = 302
ITEM_3_UNIT = 303
ITEM_3_FINAL_URLS = 304
ITEM_3_FINAL_MOBILE_URLS = 305
ITEM_4_HEADER = 400
ITEM_4_DESCRIPTION = 401
ITEM_4_PRICE = 402
ITEM_4_UNIT = 403
ITEM_4_FINAL_URLS = 404
ITEM_4_FINAL_MOBILE_URLS = 405
ITEM_5_HEADER = 500
ITEM_5_DESCRIPTION = 501
ITEM_5_PRICE = 502
ITEM_5_UNIT = 503
ITEM_5_FINAL_URLS = 504
ITEM_5_FINAL_MOBILE_URLS = 505
ITEM_6_HEADER = 600
ITEM_6_DESCRIPTION = 601
ITEM_6_PRICE = 602
ITEM_6_UNIT = 603
ITEM_6_FINAL_URLS = 604
ITEM_6_FINAL_MOBILE_URLS = 605
ITEM_7_HEADER = 700
ITEM_7_DESCRIPTION = 701
ITEM_7_PRICE = 702
ITEM_7_UNIT = 703
ITEM_7_FINAL_URLS = 704
ITEM_7_FINAL_MOBILE_URLS = 705
ITEM_8_HEADER = 800
ITEM_8_DESCRIPTION = 801
ITEM_8_PRICE = 802
ITEM_8_UNIT = 803
ITEM_8_FINAL_URLS = 804
ITEM_8_FINAL_MOBILE_URLS = 805
class ProductBiddingCategoryLevelEnum(object):
class ProductBiddingCategoryLevel(enum.IntEnum):
"""
Enum describing the level of the product bidding category.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
LEVEL1 (int): Level 1.
LEVEL2 (int): Level 2.
LEVEL3 (int): Level 3.
LEVEL4 (int): Level 4.
LEVEL5 (int): Level 5.
"""
UNSPECIFIED = 0
UNKNOWN = 1
LEVEL1 = 2
LEVEL2 = 3
LEVEL3 = 4
LEVEL4 = 5
LEVEL5 = 6
class ProductBiddingCategoryStatusEnum(object):
class ProductBiddingCategoryStatus(enum.IntEnum):
"""
Enum describing the status of the product bidding category.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ACTIVE (int): The category is active and can be used for bidding.
OBSOLETE (int): The category is obsolete. Used only for reporting purposes.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ACTIVE = 2
OBSOLETE = 3
class ProductChannelEnum(object):
class ProductChannel(enum.IntEnum):
"""
Enum describing the locality of a product offer.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ONLINE (int): The item is sold online.
LOCAL (int): The item is sold in local stores.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ONLINE = 2
LOCAL = 3
class ProductChannelExclusivityEnum(object):
class ProductChannelExclusivity(enum.IntEnum):
"""
Enum describing the availability of a product offer.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
SINGLE_CHANNEL (int): The item is sold through one channel only, either local stores or online
as indicated by its ProductChannel.
MULTI_CHANNEL (int): The item is matched to its online or local stores counterpart, indicating
it is available for purchase in both ShoppingProductChannels.
"""
UNSPECIFIED = 0
UNKNOWN = 1
SINGLE_CHANNEL = 2
MULTI_CHANNEL = 3
class ProductConditionEnum(object):
class ProductCondition(enum.IntEnum):
"""
Enum describing the condition of a product offer.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
NEW (int): The product condition is new.
REFURBISHED (int): The product condition is refurbished.
USED (int): The product condition is used.
"""
UNSPECIFIED = 0
UNKNOWN = 1
NEW = 3
REFURBISHED = 4
USED = 5
class ProductTypeLevelEnum(object):
class ProductTypeLevel(enum.IntEnum):
"""
Enum describing the level of the type of a product offer.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
LEVEL1 (int): Level 1.
LEVEL2 (int): Level 2.
LEVEL3 (int): Level 3.
LEVEL4 (int): Level 4.
LEVEL5 (int): Level 5.
"""
UNSPECIFIED = 0
UNKNOWN = 1
LEVEL1 = 7
LEVEL2 = 8
LEVEL3 = 9
LEVEL4 = 10
LEVEL5 = 11
class PromotionExtensionDiscountModifierEnum(object):
class PromotionExtensionDiscountModifier(enum.IntEnum):
"""
A promotion extension discount modifier.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
UP_TO (int): 'Up to'.
"""
UNSPECIFIED = 0
UNKNOWN = 1
UP_TO = 2
class PromotionExtensionOccasionEnum(object):
class PromotionExtensionOccasion(enum.IntEnum):
"""
A promotion extension occasion.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
NEW_YEARS (int): New Year's.
CHINESE_NEW_YEAR (int): Chinese New Year.
VALENTINES_DAY (int): Valentine's Day.
EASTER (int): Easter.
MOTHERS_DAY (int): Mother's Day.
FATHERS_DAY (int): Father's Day.
LABOR_DAY (int): Labor Day.
BACK_TO_SCHOOL (int): Back To School.
HALLOWEEN (int): Halloween.
BLACK_FRIDAY (int): Black Friday.
CYBER_MONDAY (int): Cyber Monday.
CHRISTMAS (int): Christmas.
BOXING_DAY (int): Boxing Day.
INDEPENDENCE_DAY (int): Independence Day in any country.
NATIONAL_DAY (int): National Day in any country.
END_OF_SEASON (int): End of any season.
WINTER_SALE (int): Winter Sale.
SUMMER_SALE (int): Summer sale.
FALL_SALE (int): Fall Sale.
SPRING_SALE (int): Spring Sale.
RAMADAN (int): Ramadan.
EID_AL_FITR (int): Eid al-Fitr.
EID_AL_ADHA (int): Eid al-Adha.
SINGLES_DAY (int): Singles Day.
WOMENS_DAY (int): Women's Day.
HOLI (int): Holi.
PARENTS_DAY (int): Parent's Day.
ST_NICHOLAS_DAY (int): St. Nicholas Day.
CARNIVAL (int): Carnival.
EPIPHANY (int): Epiphany, also known as Three Kings' Day.
ROSH_HASHANAH (int): Rosh Hashanah.
PASSOVER (int): Passover.
HANUKKAH (int): Hanukkah.
DIWALI (int): Diwali.
NAVRATRI (int): Navratri.
SONGKRAN (int): Available in Thai: Songkran.
YEAR_END_GIFT (int): Available in Japanese: Year-end Gift.
"""
UNSPECIFIED = 0
UNKNOWN = 1
NEW_YEARS = 2
CHINESE_NEW_YEAR = 3
VALENTINES_DAY = 4
EASTER = 5
MOTHERS_DAY = 6
FATHERS_DAY = 7
LABOR_DAY = 8
BACK_TO_SCHOOL = 9
HALLOWEEN = 10
BLACK_FRIDAY = 11
CYBER_MONDAY = 12
CHRISTMAS = 13
BOXING_DAY = 14
INDEPENDENCE_DAY = 15
NATIONAL_DAY = 16
END_OF_SEASON = 17
WINTER_SALE = 18
SUMMER_SALE = 19
FALL_SALE = 20
SPRING_SALE = 21
RAMADAN = 22
EID_AL_FITR = 23
EID_AL_ADHA = 24
SINGLES_DAY = 25
WOMENS_DAY = 26
HOLI = 27
PARENTS_DAY = 28
ST_NICHOLAS_DAY = 29
CARNIVAL = 30
EPIPHANY = 31
ROSH_HASHANAH = 32
PASSOVER = 33
HANUKKAH = 34
DIWALI = 35
NAVRATRI = 36
SONGKRAN = 37
YEAR_END_GIFT = 38
class PromotionPlaceholderFieldEnum(object):
class PromotionPlaceholderField(enum.IntEnum):
"""
Possible values for Promotion placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PROMOTION_TARGET (int): Data Type: STRING. The text that appears on the ad when the extension is
shown.
DISCOUNT_MODIFIER (int): Data Type: STRING. Allows you to add "up to" phrase to the promotion,
in case you have variable promotion rates.
PERCENT_OFF (int): Data Type: INT64. Takes a value in micros, where 1 million micros
represents 1%, and is shown as a percentage when rendered.
MONEY_AMOUNT_OFF (int): Data Type: MONEY. Requires a currency and an amount of money.
PROMOTION_CODE (int): Data Type: STRING. A string that the user enters to get the discount.
ORDERS_OVER_AMOUNT (int): Data Type: MONEY. A minimum spend before the user qualifies for the
promotion.
PROMOTION_START (int): Data Type: DATE. The start date of the promotion.
PROMOTION_END (int): Data Type: DATE. The end date of the promotion.
OCCASION (int): Data Type: STRING. Describes the associated event for the promotion
using one of the PromotionExtensionOccasion enum values, for example
NEW\_YEARS.
FINAL_URLS (int): Data Type: URL\_LIST. Final URLs to be used in the ad when using
Upgraded URLs.
FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. Final mobile URLs for the ad when using Upgraded
URLs.
TRACKING_URL (int): Data Type: URL. Tracking template for the ad when using Upgraded URLs.
LANGUAGE (int): Data Type: STRING. A string represented by a language code for the
promotion.
FINAL_URL_SUFFIX (int): Data Type: STRING. Final URL suffix for the ad when using parallel
tracking.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PROMOTION_TARGET = 2
DISCOUNT_MODIFIER = 3
PERCENT_OFF = 4
MONEY_AMOUNT_OFF = 5
PROMOTION_CODE = 6
ORDERS_OVER_AMOUNT = 7
PROMOTION_START = 8
PROMOTION_END = 9
OCCASION = 10
FINAL_URLS = 11
FINAL_MOBILE_URLS = 12
TRACKING_URL = 13
LANGUAGE = 14
FINAL_URL_SUFFIX = 15
class ProximityRadiusUnitsEnum(object):
class ProximityRadiusUnits(enum.IntEnum):
"""
The unit of radius distance in proximity (e.g. MILES)
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
MILES (int): Miles
KILOMETERS (int): Kilometers
"""
UNSPECIFIED = 0
UNKNOWN = 1
MILES = 2
KILOMETERS = 3
class QualityScoreBucketEnum(object):
class QualityScoreBucket(enum.IntEnum):
"""
Enum listing the possible quality score buckets.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
BELOW_AVERAGE (int): Quality of the creative is below average.
AVERAGE (int): Quality of the creative is average.
ABOVE_AVERAGE (int): Quality of the creative is above average.
"""
UNSPECIFIED = 0
UNKNOWN = 1
BELOW_AVERAGE = 2
AVERAGE = 3
ABOVE_AVERAGE = 4
class QueryErrorEnum(object):
class QueryError(enum.IntEnum):
"""
Enum describing possible query errors.
Attributes:
UNSPECIFIED (int): Name unspecified.
UNKNOWN (int): The received error code is not known in this version.
QUERY_ERROR (int): Returned if all other query error reasons are not applicable.
BAD_ENUM_CONSTANT (int): A condition used in the query references an invalid enum constant.
BAD_ESCAPE_SEQUENCE (int): Query contains an invalid escape sequence.
BAD_FIELD_NAME (int): Field name is invalid.
BAD_LIMIT_VALUE (int): Limit value is invalid (i.e. not a number)
BAD_NUMBER (int): Encountered number can not be parsed.
BAD_OPERATOR (int): Invalid operator encountered.
BAD_RESOURCE_TYPE_IN_FROM_CLAUSE (int): Invalid resource type was specified in the FROM clause.
BAD_SYMBOL (int): Non-ASCII symbol encountered outside of strings.
BAD_VALUE (int): Value is invalid.
DATE_RANGE_TOO_WIDE (int): Date filters fail to restrict date to a range smaller than 31 days.
Applicable if the query is segmented by date.
EXPECTED_AND (int): Expected AND between values with BETWEEN operator.
EXPECTED_BY (int): Expecting ORDER BY to have BY.
EXPECTED_DIMENSION_FIELD_IN_SELECT_CLAUSE (int): There was no dimension field selected.
EXPECTED_FILTERS_ON_DATE_RANGE (int): Missing filters on date related fields.
EXPECTED_FROM (int): Missing FROM clause.
EXPECTED_LIST (int): The operator used in the conditions requires the value to be a list.
EXPECTED_REFERENCED_FIELD_IN_SELECT_CLAUSE (int): Fields used in WHERE or ORDER BY clauses are missing from the SELECT
clause.
EXPECTED_SELECT (int): SELECT is missing at the beginning of query.
EXPECTED_SINGLE_VALUE (int): A list was passed as a value to a condition whose operator expects a
single value.
EXPECTED_VALUE_WITH_BETWEEN_OPERATOR (int): Missing one or both values with BETWEEN operator.
INVALID_DATE_FORMAT (int): Invalid date format. Expected 'YYYY-MM-DD'.
INVALID_STRING_VALUE (int): Value passed was not a string when it should have been. I.e., it was a
number or unquoted literal.
INVALID_VALUE_WITH_BETWEEN_OPERATOR (int): A String value passed to the BETWEEN operator does not parse as a date.
INVALID_VALUE_WITH_DURING_OPERATOR (int): The value passed to the DURING operator is not a Date range literal
INVALID_VALUE_WITH_LIKE_OPERATOR (int): A non-string value was passed to the LIKE operator.
OPERATOR_FIELD_MISMATCH (int): An operator was provided that is inapplicable to the field being
filtered.
PROHIBITED_EMPTY_LIST_IN_CONDITION (int): A Condition was found with an empty list.
PROHIBITED_ENUM_CONSTANT (int): A condition used in the query references an unsupported enum constant.
PROHIBITED_FIELD_COMBINATION_IN_SELECT_CLAUSE (int): Fields that are not allowed to be selected together were included in
the SELECT clause.
PROHIBITED_FIELD_IN_ORDER_BY_CLAUSE (int): A field that is not orderable was included in the ORDER BY clause.
PROHIBITED_FIELD_IN_SELECT_CLAUSE (int): A field that is not selectable was included in the SELECT clause.
PROHIBITED_FIELD_IN_WHERE_CLAUSE (int): A field that is not filterable was included in the WHERE clause.
PROHIBITED_RESOURCE_TYPE_IN_FROM_CLAUSE (int): Resource type specified in the FROM clause is not supported by this
service.
PROHIBITED_RESOURCE_TYPE_IN_SELECT_CLAUSE (int): A field that comes from an incompatible resource was included in the
SELECT clause.
PROHIBITED_RESOURCE_TYPE_IN_WHERE_CLAUSE (int): A field that comes from an incompatible resource was included in the
WHERE clause.
PROHIBITED_METRIC_IN_SELECT_OR_WHERE_CLAUSE (int): A metric incompatible with the main resource or other selected
segmenting resources was included in the SELECT or WHERE clause.
PROHIBITED_SEGMENT_IN_SELECT_OR_WHERE_CLAUSE (int): A segment incompatible with the main resource or other selected
segmenting resources was included in the SELECT or WHERE clause.
PROHIBITED_SEGMENT_WITH_METRIC_IN_SELECT_OR_WHERE_CLAUSE (int): A segment in the SELECT clause is incompatible with a metric in the
SELECT or WHERE clause.
LIMIT_VALUE_TOO_LOW (int): The value passed to the limit clause is too low.
PROHIBITED_NEWLINE_IN_STRING (int): Query has a string containing a newline character.
PROHIBITED_VALUE_COMBINATION_IN_LIST (int): List contains values of different types.
PROHIBITED_VALUE_COMBINATION_WITH_BETWEEN_OPERATOR (int): The values passed to the BETWEEN operator are not of the same type.
STRING_NOT_TERMINATED (int): Query contains unterminated string.
TOO_MANY_SEGMENTS (int): Too many segments are specified in SELECT clause.
UNEXPECTED_END_OF_QUERY (int): Query is incomplete and cannot be parsed.
UNEXPECTED_FROM_CLAUSE (int): FROM clause cannot be specified in this query.
UNRECOGNIZED_FIELD (int): Query contains one or more unrecognized fields.
UNEXPECTED_INPUT (int): Query has an unexpected extra part.
REQUESTED_METRICS_FOR_MANAGER (int): Metrics cannot be requested for a manager account. To retrieve metrics,
issue separate requests against each client account under the manager
account.
"""
UNSPECIFIED = 0
UNKNOWN = 1
QUERY_ERROR = 50
BAD_ENUM_CONSTANT = 18
BAD_ESCAPE_SEQUENCE = 7
BAD_FIELD_NAME = 12
BAD_LIMIT_VALUE = 15
BAD_NUMBER = 5
BAD_OPERATOR = 3
BAD_RESOURCE_TYPE_IN_FROM_CLAUSE = 45
BAD_SYMBOL = 2
BAD_VALUE = 4
DATE_RANGE_TOO_WIDE = 36
EXPECTED_AND = 30
EXPECTED_BY = 14
EXPECTED_DIMENSION_FIELD_IN_SELECT_CLAUSE = 37
EXPECTED_FILTERS_ON_DATE_RANGE = 55
EXPECTED_FROM = 44
EXPECTED_LIST = 41
EXPECTED_REFERENCED_FIELD_IN_SELECT_CLAUSE = 16
EXPECTED_SELECT = 13
EXPECTED_SINGLE_VALUE = 42
EXPECTED_VALUE_WITH_BETWEEN_OPERATOR = 29
INVALID_DATE_FORMAT = 38
INVALID_STRING_VALUE = 57
INVALID_VALUE_WITH_BETWEEN_OPERATOR = 26
INVALID_VALUE_WITH_DURING_OPERATOR = 22
INVALID_VALUE_WITH_LIKE_OPERATOR = 56
OPERATOR_FIELD_MISMATCH = 35
PROHIBITED_EMPTY_LIST_IN_CONDITION = 28
PROHIBITED_ENUM_CONSTANT = 54
PROHIBITED_FIELD_COMBINATION_IN_SELECT_CLAUSE = 31
PROHIBITED_FIELD_IN_ORDER_BY_CLAUSE = 40
PROHIBITED_FIELD_IN_SELECT_CLAUSE = 23
PROHIBITED_FIELD_IN_WHERE_CLAUSE = 24
PROHIBITED_RESOURCE_TYPE_IN_FROM_CLAUSE = 43
PROHIBITED_RESOURCE_TYPE_IN_SELECT_CLAUSE = 48
PROHIBITED_RESOURCE_TYPE_IN_WHERE_CLAUSE = 58
PROHIBITED_METRIC_IN_SELECT_OR_WHERE_CLAUSE = 49
PROHIBITED_SEGMENT_IN_SELECT_OR_WHERE_CLAUSE = 51
PROHIBITED_SEGMENT_WITH_METRIC_IN_SELECT_OR_WHERE_CLAUSE = 53
LIMIT_VALUE_TOO_LOW = 25
PROHIBITED_NEWLINE_IN_STRING = 8
PROHIBITED_VALUE_COMBINATION_IN_LIST = 10
PROHIBITED_VALUE_COMBINATION_WITH_BETWEEN_OPERATOR = 21
STRING_NOT_TERMINATED = 6
TOO_MANY_SEGMENTS = 34
UNEXPECTED_END_OF_QUERY = 9
UNEXPECTED_FROM_CLAUSE = 47
UNRECOGNIZED_FIELD = 32
UNEXPECTED_INPUT = 11
REQUESTED_METRICS_FOR_MANAGER = 59
class QuotaErrorEnum(object):
class QuotaError(enum.IntEnum):
"""
Enum describing possible quota errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
RESOURCE_EXHAUSTED (int): Too many requests.
ACCESS_PROHIBITED (int): Access is prohibited.
RESOURCE_TEMPORARILY_EXHAUSTED (int): Too many requests in a short amount of time.
"""
UNSPECIFIED = 0
UNKNOWN = 1
RESOURCE_EXHAUSTED = 2
ACCESS_PROHIBITED = 3
RESOURCE_TEMPORARILY_EXHAUSTED = 4
class RangeErrorEnum(object):
class RangeError(enum.IntEnum):
"""
Enum describing possible range errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
TOO_LOW (int): Too low.
TOO_HIGH (int): Too high.
"""
UNSPECIFIED = 0
UNKNOWN = 1
TOO_LOW = 2
TOO_HIGH = 3
class RealEstatePlaceholderFieldEnum(object):
class RealEstatePlaceholderField(enum.IntEnum):
"""
Possible values for Real Estate placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
LISTING_ID (int): Data Type: STRING. Unique ID.
LISTING_NAME (int): Data Type: STRING. Main headline with listing name to be shown in dynamic
ad.
CITY_NAME (int): Data Type: STRING. City name to be shown in dynamic ad.
DESCRIPTION (int): Data Type: STRING. Description of listing to be shown in dynamic ad.
ADDRESS (int): Data Type: STRING. Complete listing address, including postal code.
PRICE (int): Data Type: STRING. Price to be shown in the ad.
Example: "100.00 USD"
FORMATTED_PRICE (int): Data Type: STRING. Formatted price to be shown in the ad.
Example: "Starting at $100.00 USD", "$80 - $100"
IMAGE_URL (int): Data Type: URL. Image to be displayed in the ad.
PROPERTY_TYPE (int): Data Type: STRING. Type of property (house, condo, apartment, etc.) used
to group like items together for recommendation engine.
LISTING_TYPE (int): Data Type: STRING. Type of listing (resale, rental, foreclosure, etc.)
used to group like items together for recommendation engine.
CONTEXTUAL_KEYWORDS (int): Data Type: STRING\_LIST. Keywords used for product retrieval.
FINAL_URLS (int): Data Type: URL\_LIST. Final URLs to be used in ad when using Upgraded
URLs; the more specific the better (e.g. the individual URL of a
specific listing and its location).
FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. Final mobile URLs for the ad when using Upgraded
URLs.
TRACKING_URL (int): Data Type: URL. Tracking template for the ad when using Upgraded URLs.
ANDROID_APP_LINK (int): Data Type: STRING. Android app link. Must be formatted as:
android-app://{package\_id}/{scheme}/{host\_path}. The components are
defined as follows: package\_id: app ID as specified in Google Play.
scheme: the scheme to pass to the application. Can be HTTP, or a custom
scheme. host\_path: identifies the specific content within your
application.
SIMILAR_LISTING_IDS (int): Data Type: STRING\_LIST. List of recommended listing IDs to show
together with this item.
IOS_APP_LINK (int): Data Type: STRING. iOS app link.
IOS_APP_STORE_ID (int): Data Type: INT64. iOS app store ID.
"""
UNSPECIFIED = 0
UNKNOWN = 1
LISTING_ID = 2
LISTING_NAME = 3
CITY_NAME = 4
DESCRIPTION = 5
ADDRESS = 6
PRICE = 7
FORMATTED_PRICE = 8
IMAGE_URL = 9
PROPERTY_TYPE = 10
LISTING_TYPE = 11
CONTEXTUAL_KEYWORDS = 12
FINAL_URLS = 13
FINAL_MOBILE_URLS = 14
TRACKING_URL = 15
ANDROID_APP_LINK = 16
SIMILAR_LISTING_IDS = 17
IOS_APP_LINK = 18
IOS_APP_STORE_ID = 19
class RecommendationErrorEnum(object):
class RecommendationError(enum.IntEnum):
"""
Enum describing possible errors from applying a recommendation.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
BUDGET_AMOUNT_TOO_SMALL (int): The specified budget amount is too low e.g. lower than minimum currency
unit or lower than ad group minimum cost-per-click.
BUDGET_AMOUNT_TOO_LARGE (int): The specified budget amount is too large.
INVALID_BUDGET_AMOUNT (int): The specified budget amount is not a valid amount. e.g. not a multiple
of minimum currency unit.
POLICY_ERROR (int): The specified keyword or ad violates ad policy.
INVALID_BID_AMOUNT (int): The specified bid amount is not valid. e.g. too many fractional digits,
or negative amount.
ADGROUP_KEYWORD_LIMIT (int): The number of keywords in ad group have reached the maximum allowed.
RECOMMENDATION_ALREADY_APPLIED (int): The recommendation requested to apply has already been applied.
RECOMMENDATION_INVALIDATED (int): The recommendation requested to apply has been invalidated.
TOO_MANY_OPERATIONS (int): The number of operations in a single request exceeds the maximum allowed.
NO_OPERATIONS (int): There are no operations in the request.
DIFFERENT_TYPES_NOT_SUPPORTED (int): Operations with multiple recommendation types are not supported when
partial failure mode is not enabled.
DUPLICATE_RESOURCE_NAME (int): Request contains multiple operations with the same resource\_name.
RECOMMENDATION_ALREADY_DISMISSED (int): The recommendation requested to dismiss has already been dismissed.
INVALID_APPLY_REQUEST (int): The recommendation apply request was malformed and invalid.
"""
UNSPECIFIED = 0
UNKNOWN = 1
BUDGET_AMOUNT_TOO_SMALL = 2
BUDGET_AMOUNT_TOO_LARGE = 3
INVALID_BUDGET_AMOUNT = 4
POLICY_ERROR = 5
INVALID_BID_AMOUNT = 6
ADGROUP_KEYWORD_LIMIT = 7
RECOMMENDATION_ALREADY_APPLIED = 8
RECOMMENDATION_INVALIDATED = 9
TOO_MANY_OPERATIONS = 10
NO_OPERATIONS = 11
DIFFERENT_TYPES_NOT_SUPPORTED = 12
DUPLICATE_RESOURCE_NAME = 13
RECOMMENDATION_ALREADY_DISMISSED = 14
INVALID_APPLY_REQUEST = 15
class RecommendationTypeEnum(object):
class RecommendationType(enum.IntEnum):
"""
Types of recommendations.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
CAMPAIGN_BUDGET (int): Budget recommendation for budget constrained campaigns.
KEYWORD (int): Keyword recommendation.
TEXT_AD (int): Recommendation to add a new text ad.
TARGET_CPA_OPT_IN (int): Recommendation to update a campaign to use a Target CPA bidding strategy.
MAXIMIZE_CONVERSIONS_OPT_IN (int): Recommendation to update a campaign to use the Maximize Conversions
bidding strategy.
ENHANCED_CPC_OPT_IN (int): Recommendation to enable Enhanced Cost Per Click for a campaign.
SEARCH_PARTNERS_OPT_IN (int): Recommendation to start showing your campaign's ads on Google Search
Partners Websites.
MAXIMIZE_CLICKS_OPT_IN (int): Recommendation to update a campaign to use a Maximize Clicks bidding
strategy.
OPTIMIZE_AD_ROTATION (int): Recommendation to start using the "Optimize" ad rotation setting for the
given ad group.
KEYWORD_MATCH_TYPE (int): Recommendation to change an existing keyword from one match type to a
broader match type.
MOVE_UNUSED_BUDGET (int): Recommendation to move unused budget from one budget to a constrained
budget.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CAMPAIGN_BUDGET = 2
KEYWORD = 3
TEXT_AD = 4
TARGET_CPA_OPT_IN = 5
MAXIMIZE_CONVERSIONS_OPT_IN = 6
ENHANCED_CPC_OPT_IN = 7
SEARCH_PARTNERS_OPT_IN = 8
MAXIMIZE_CLICKS_OPT_IN = 9
OPTIMIZE_AD_ROTATION = 10
KEYWORD_MATCH_TYPE = 14
MOVE_UNUSED_BUDGET = 15
class RegionCodeErrorEnum(object):
class RegionCodeError(enum.IntEnum):
"""
Enum describing possible region code errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
INVALID_REGION_CODE (int): Invalid region code.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_REGION_CODE = 2
class RequestErrorEnum(object):
class RequestError(enum.IntEnum):
"""
Enum describing possible request errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
RESOURCE_NAME_MISSING (int): Resource name is required for this request.
RESOURCE_NAME_MALFORMED (int): Resource name provided is malformed.
BAD_RESOURCE_ID (int): Resource name provided is malformed.
INVALID_CUSTOMER_ID (int): Customer ID is invalid.
OPERATION_REQUIRED (int): Mutate operation should have either create, update, or remove specified.
RESOURCE_NOT_FOUND (int): Requested resource not found.
INVALID_PAGE_TOKEN (int): Next page token specified in user request is invalid.
EXPIRED_PAGE_TOKEN (int): Next page token specified in user request has expired.
INVALID_PAGE_SIZE (int): Page size specified in user request is invalid.
REQUIRED_FIELD_MISSING (int): Required field is missing.
IMMUTABLE_FIELD (int): The field cannot be modified because it's immutable. It's also possible
that the field can be modified using 'create' operation but not 'update'.
TOO_MANY_MUTATE_OPERATIONS (int): Received too many entries in request.
CANNOT_BE_EXECUTED_BY_MANAGER_ACCOUNT (int): Request cannot be executed by a manager account.
CANNOT_MODIFY_FOREIGN_FIELD (int): Mutate request was attempting to modify a readonly field.
For instance, Budget fields can be requested for Ad Group,
but are read-only for adGroups:mutate.
INVALID_ENUM_VALUE (int): Enum value is not permitted.
DEVELOPER_TOKEN_PARAMETER_MISSING (int): The developer-token parameter is required for all requests.
LOGIN_CUSTOMER_ID_PARAMETER_MISSING (int): The login-customer-id parameter is required for this request.
VALIDATE_ONLY_REQUEST_HAS_PAGE_TOKEN (int): page\_token is set in the validate only request
"""
UNSPECIFIED = 0
UNKNOWN = 1
RESOURCE_NAME_MISSING = 3
RESOURCE_NAME_MALFORMED = 4
BAD_RESOURCE_ID = 17
INVALID_CUSTOMER_ID = 16
OPERATION_REQUIRED = 5
RESOURCE_NOT_FOUND = 6
INVALID_PAGE_TOKEN = 7
EXPIRED_PAGE_TOKEN = 8
INVALID_PAGE_SIZE = 22
REQUIRED_FIELD_MISSING = 9
IMMUTABLE_FIELD = 11
TOO_MANY_MUTATE_OPERATIONS = 13
CANNOT_BE_EXECUTED_BY_MANAGER_ACCOUNT = 14
CANNOT_MODIFY_FOREIGN_FIELD = 15
INVALID_ENUM_VALUE = 18
DEVELOPER_TOKEN_PARAMETER_MISSING = 19
LOGIN_CUSTOMER_ID_PARAMETER_MISSING = 20
VALIDATE_ONLY_REQUEST_HAS_PAGE_TOKEN = 21
class ResourceAccessDeniedErrorEnum(object):
class ResourceAccessDeniedError(enum.IntEnum):
"""
Enum describing possible resource access denied errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
WRITE_ACCESS_DENIED (int): User did not have write access.
"""
UNSPECIFIED = 0
UNKNOWN = 1
WRITE_ACCESS_DENIED = 3
class ResourceCountLimitExceededErrorEnum(object):
class ResourceCountLimitExceededError(enum.IntEnum):
"""
Enum describing possible resource count limit exceeded errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
ACCOUNT_LIMIT (int): Indicates that this request would exceed the number of allowed resources
for the Google Ads account. The exact resource type and limit being
checked can be inferred from accountLimitType.
CAMPAIGN_LIMIT (int): Indicates that this request would exceed the number of allowed resources
in a Campaign. The exact resource type and limit being checked can be
inferred from accountLimitType, and the numeric id of the
Campaign involved is given by enclosingId.
ADGROUP_LIMIT (int): Indicates that this request would exceed the number of allowed resources
in an ad group. The exact resource type and limit being checked can be
inferred from accountLimitType, and the numeric id of the
ad group involved is given by enclosingId.
AD_GROUP_AD_LIMIT (int): Indicates that this request would exceed the number of allowed resources
in an ad group ad. The exact resource type and limit being checked can
be inferred from accountLimitType, and the enclosingId
contains the ad group id followed by the ad id, separated by a single
comma (,).
AD_GROUP_CRITERION_LIMIT (int): Indicates that this request would exceed the number of allowed resources
in an ad group criterion. The exact resource type and limit being checked
can be inferred from accountLimitType, and the
enclosingId contains the ad group id followed by the
criterion id, separated by a single comma (,).
SHARED_SET_LIMIT (int): Indicates that this request would exceed the number of allowed resources
in this shared set. The exact resource type and limit being checked can
be inferred from accountLimitType, and the numeric id of the
shared set involved is given by enclosingId.
MATCHING_FUNCTION_LIMIT (int): Exceeds a limit related to a matching function.
RESPONSE_ROW_LIMIT_EXCEEDED (int): The response for this request would exceed the maximum number of rows
that can be returned.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ACCOUNT_LIMIT = 2
CAMPAIGN_LIMIT = 3
ADGROUP_LIMIT = 4
AD_GROUP_AD_LIMIT = 5
AD_GROUP_CRITERION_LIMIT = 6
SHARED_SET_LIMIT = 7
MATCHING_FUNCTION_LIMIT = 8
RESPONSE_ROW_LIMIT_EXCEEDED = 9
class SearchTermMatchTypeEnum(object):
class SearchTermMatchType(enum.IntEnum):
"""
Possible match types for a keyword triggering an ad, including variants.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
BROAD (int): Broad match.
EXACT (int): Exact match.
PHRASE (int): Phrase match.
NEAR_EXACT (int): Exact match (close variant).
NEAR_PHRASE (int): Phrase match (close variant).
"""
UNSPECIFIED = 0
UNKNOWN = 1
BROAD = 2
EXACT = 3
PHRASE = 4
NEAR_EXACT = 5
NEAR_PHRASE = 6
class SearchTermTargetingStatusEnum(object):
class SearchTermTargetingStatus(enum.IntEnum):
"""
Indicates whether the search term is one of your targeted or excluded
keywords.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ADDED (int): Search term is added to targeted keywords.
EXCLUDED (int): Search term matches a negative keyword.
ADDED_EXCLUDED (int): Search term has been both added and excluded.
NONE (int): Search term is neither targeted nor excluded.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ADDED = 2
EXCLUDED = 3
ADDED_EXCLUDED = 4
NONE = 5
class ServedAssetFieldTypeEnum(object):
class ServedAssetFieldType(enum.IntEnum):
"""
The possible asset field types.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
HEADLINE_1 (int): The asset is used in headline 1.
HEADLINE_2 (int): The asset is used in headline 2.
HEADLINE_3 (int): The asset is used in headline 3.
DESCRIPTION_1 (int): The asset is used in description 1.
DESCRIPTION_2 (int): The asset is used in description 2.
"""
UNSPECIFIED = 0
UNKNOWN = 1
HEADLINE_1 = 2
HEADLINE_2 = 3
HEADLINE_3 = 4
DESCRIPTION_1 = 5
DESCRIPTION_2 = 6
class SettingErrorEnum(object):
class SettingError(enum.IntEnum):
"""
Enum describing possible setting errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
SETTING_TYPE_IS_NOT_AVAILABLE (int): The campaign setting is not available for this Google Ads account.
SETTING_TYPE_IS_NOT_COMPATIBLE_WITH_CAMPAIGN (int): The setting is not compatible with the campaign.
TARGETING_SETTING_CONTAINS_INVALID_CRITERION_TYPE_GROUP (int): The supplied TargetingSetting contains an invalid CriterionTypeGroup. See
CriterionTypeGroup documentation for CriterionTypeGroups allowed
in Campaign or AdGroup TargetingSettings.
TARGETING_SETTING_DEMOGRAPHIC_CRITERION_TYPE_GROUPS_MUST_BE_SET_TO_TARGET_ALL (int): TargetingSetting must not explicitly set any of the Demographic
CriterionTypeGroups (AGE\_RANGE, GENDER, PARENT, INCOME\_RANGE) to false
(it's okay to not set them at all, in which case the system will set
them to true automatically).
TARGETING_SETTING_CANNOT_CHANGE_TARGET_ALL_TO_FALSE_FOR_DEMOGRAPHIC_CRITERION_TYPE_GROUP (int): TargetingSetting cannot change any of the Demographic
CriterionTypeGroups (AGE\_RANGE, GENDER, PARENT, INCOME\_RANGE) from
true to false.
DYNAMIC_SEARCH_ADS_SETTING_AT_LEAST_ONE_FEED_ID_MUST_BE_PRESENT (int): At least one feed id should be present.
DYNAMIC_SEARCH_ADS_SETTING_CONTAINS_INVALID_DOMAIN_NAME (int): The supplied DynamicSearchAdsSetting contains an invalid domain name.
DYNAMIC_SEARCH_ADS_SETTING_CONTAINS_SUBDOMAIN_NAME (int): The supplied DynamicSearchAdsSetting contains a subdomain name.
DYNAMIC_SEARCH_ADS_SETTING_CONTAINS_INVALID_LANGUAGE_CODE (int): The supplied DynamicSearchAdsSetting contains an invalid language code.
TARGET_ALL_IS_NOT_ALLOWED_FOR_PLACEMENT_IN_SEARCH_CAMPAIGN (int): TargetingSettings in search campaigns should not have
CriterionTypeGroup.PLACEMENT set to targetAll.
UNIVERSAL_APP_CAMPAIGN_SETTING_DUPLICATE_DESCRIPTION (int): Duplicate description in universal app setting description field.
UNIVERSAL_APP_CAMPAIGN_SETTING_DESCRIPTION_LINE_WIDTH_TOO_LONG (int): Description line width is too long in universal app setting description
field.
UNIVERSAL_APP_CAMPAIGN_SETTING_APP_ID_CANNOT_BE_MODIFIED (int): Universal app setting appId field cannot be modified for COMPLETE
campaigns.
TOO_MANY_YOUTUBE_MEDIA_IDS_IN_UNIVERSAL_APP_CAMPAIGN (int): YoutubeVideoMediaIds in universal app setting cannot exceed size limit.
TOO_MANY_IMAGE_MEDIA_IDS_IN_UNIVERSAL_APP_CAMPAIGN (int): ImageMediaIds in universal app setting cannot exceed size limit.
MEDIA_INCOMPATIBLE_FOR_UNIVERSAL_APP_CAMPAIGN (int): Media is incompatible for universal app campaign.
TOO_MANY_EXCLAMATION_MARKS (int): Too many exclamation marks in universal app campaign ad text ideas.
"""
UNSPECIFIED = 0
UNKNOWN = 1
SETTING_TYPE_IS_NOT_AVAILABLE = 3
SETTING_TYPE_IS_NOT_COMPATIBLE_WITH_CAMPAIGN = 4
TARGETING_SETTING_CONTAINS_INVALID_CRITERION_TYPE_GROUP = 5
TARGETING_SETTING_DEMOGRAPHIC_CRITERION_TYPE_GROUPS_MUST_BE_SET_TO_TARGET_ALL = 6
TARGETING_SETTING_CANNOT_CHANGE_TARGET_ALL_TO_FALSE_FOR_DEMOGRAPHIC_CRITERION_TYPE_GROUP = 7
DYNAMIC_SEARCH_ADS_SETTING_AT_LEAST_ONE_FEED_ID_MUST_BE_PRESENT = 8
DYNAMIC_SEARCH_ADS_SETTING_CONTAINS_INVALID_DOMAIN_NAME = 9
DYNAMIC_SEARCH_ADS_SETTING_CONTAINS_SUBDOMAIN_NAME = 10
DYNAMIC_SEARCH_ADS_SETTING_CONTAINS_INVALID_LANGUAGE_CODE = 11
TARGET_ALL_IS_NOT_ALLOWED_FOR_PLACEMENT_IN_SEARCH_CAMPAIGN = 12
UNIVERSAL_APP_CAMPAIGN_SETTING_DUPLICATE_DESCRIPTION = 13
UNIVERSAL_APP_CAMPAIGN_SETTING_DESCRIPTION_LINE_WIDTH_TOO_LONG = 14
UNIVERSAL_APP_CAMPAIGN_SETTING_APP_ID_CANNOT_BE_MODIFIED = 15
TOO_MANY_YOUTUBE_MEDIA_IDS_IN_UNIVERSAL_APP_CAMPAIGN = 16
TOO_MANY_IMAGE_MEDIA_IDS_IN_UNIVERSAL_APP_CAMPAIGN = 17
MEDIA_INCOMPATIBLE_FOR_UNIVERSAL_APP_CAMPAIGN = 18
TOO_MANY_EXCLAMATION_MARKS = 19
class SharedCriterionErrorEnum(object):
class SharedCriterionError(enum.IntEnum):
"""
Enum describing possible shared criterion errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CRITERION_TYPE_NOT_ALLOWED_FOR_SHARED_SET_TYPE (int): The criterion is not appropriate for the shared set type.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CRITERION_TYPE_NOT_ALLOWED_FOR_SHARED_SET_TYPE = 2
class SharedSetErrorEnum(object):
class SharedSetError(enum.IntEnum):
"""
Enum describing possible shared set errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CUSTOMER_CANNOT_CREATE_SHARED_SET_OF_THIS_TYPE (int): The customer cannot create this type of shared set.
DUPLICATE_NAME (int): A shared set with this name already exists.
SHARED_SET_REMOVED (int): Removed shared sets cannot be mutated.
SHARED_SET_IN_USE (int): The shared set cannot be removed because it is in use.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CUSTOMER_CANNOT_CREATE_SHARED_SET_OF_THIS_TYPE = 2
DUPLICATE_NAME = 3
SHARED_SET_REMOVED = 4
SHARED_SET_IN_USE = 5
class SharedSetStatusEnum(object):
class SharedSetStatus(enum.IntEnum):
"""
Enum listing the possible shared set statuses.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ENABLED (int): The shared set is enabled.
REMOVED (int): The shared set is removed and can no longer be used.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
class SharedSetTypeEnum(object):
class SharedSetType(enum.IntEnum):
"""
Enum listing the possible shared set types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
NEGATIVE_KEYWORDS (int): A set of keywords that can be excluded from targeting.
NEGATIVE_PLACEMENTS (int): A set of placements that can be excluded from targeting.
"""
UNSPECIFIED = 0
UNKNOWN = 1
NEGATIVE_KEYWORDS = 2
NEGATIVE_PLACEMENTS = 3
class SitelinkPlaceholderFieldEnum(object):
class SitelinkPlaceholderField(enum.IntEnum):
"""
Possible values for Sitelink placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
TEXT (int): Data Type: STRING. The link text for your sitelink.
LINE_1 (int): Data Type: STRING. First line of the sitelink description.
LINE_2 (int): Data Type: STRING. Second line of the sitelink description.
FINAL_URLS (int): Data Type: URL\_LIST. Final URLs for the sitelink when using Upgraded
URLs.
FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. Final Mobile URLs for the sitelink when using
Upgraded URLs.
TRACKING_URL (int): Data Type: URL. Tracking template for the sitelink when using Upgraded
URLs.
FINAL_URL_SUFFIX (int): Data Type: STRING. Final URL suffix for sitelink when using parallel
tracking.
"""
UNSPECIFIED = 0
UNKNOWN = 1
TEXT = 2
LINE_1 = 3
LINE_2 = 4
FINAL_URLS = 5
FINAL_MOBILE_URLS = 6
TRACKING_URL = 7
FINAL_URL_SUFFIX = 8
class SlotEnum(object):
class Slot(enum.IntEnum):
"""
Enumerates possible positions of the Ad.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
SEARCH_SIDE (int): Google search: Side.
SEARCH_TOP (int): Google search: Top.
SEARCH_OTHER (int): Google search: Other.
CONTENT (int): Google Display Network.
SEARCH_PARTNER_TOP (int): Search partners: Top.
SEARCH_PARTNER_OTHER (int): Search partners: Other.
MIXED (int): Cross-network.
"""
UNSPECIFIED = 0
UNKNOWN = 1
SEARCH_SIDE = 2
SEARCH_TOP = 3
SEARCH_OTHER = 4
CONTENT = 5
SEARCH_PARTNER_TOP = 6
SEARCH_PARTNER_OTHER = 7
MIXED = 8
class SpendingLimitTypeEnum(object):
class SpendingLimitType(enum.IntEnum):
"""
The possible spending limit types used by certain resources as an
alternative to absolute money values in micros.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
INFINITE (int): Infinite, indicates unlimited spending power.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INFINITE = 2
class StringFormatErrorEnum(object):
class StringFormatError(enum.IntEnum):
"""
Enum describing possible string format errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
ILLEGAL_CHARS (int): The input string value contains disallowed characters.
INVALID_FORMAT (int): The input string value is invalid for the associated field.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ILLEGAL_CHARS = 2
INVALID_FORMAT = 3
class StringLengthErrorEnum(object):
class StringLengthError(enum.IntEnum):
"""
Enum describing possible string length errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
TOO_SHORT (int): Too short.
TOO_LONG (int): Too long.
"""
UNSPECIFIED = 0
UNKNOWN = 1
TOO_SHORT = 2
TOO_LONG = 3
class StructuredSnippetPlaceholderFieldEnum(object):
class StructuredSnippetPlaceholderField(enum.IntEnum):
"""
Possible values for Structured Snippet placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
HEADER (int): Data Type: STRING. The category of snippet of your products/services.
Must match one of the predefined structured snippets headers exactly.
See
https://developers.google.com/adwords/api
/docs/appendix/structured-snippet-headers
SNIPPETS (int): Data Type: STRING\_LIST. Text values that describe your
products/services. All text must be family safe. Special or non-ASCII
characters are not permitted. A snippet can be at most 25 characters.
"""
UNSPECIFIED = 0
UNKNOWN = 1
HEADER = 2
SNIPPETS = 3
class SystemManagedResourceSourceEnum(object):
class SystemManagedResourceSource(enum.IntEnum):
"""
Enum listing the possible system managed entity sources.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
AD_VARIATIONS (int): Generated ad variations experiment ad.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AD_VARIATIONS = 2
class TargetCpaOptInRecommendationGoalEnum(object):
class TargetCpaOptInRecommendationGoal(enum.IntEnum):
"""
Goal of TargetCpaOptIn recommendation.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
SAME_COST (int): Recommendation to set Target CPA to maintain the same cost.
SAME_CONVERSIONS (int): Recommendation to set Target CPA to maintain the same conversions.
SAME_CPA (int): Recommendation to set Target CPA to maintain the same CPA.
CLOSEST_CPA (int): Recommendation to set Target CPA to a value that is as close as possible
to, yet lower than, the actual CPA (computed for past 28 days).
"""
UNSPECIFIED = 0
UNKNOWN = 1
SAME_COST = 2
SAME_CONVERSIONS = 3
SAME_CPA = 4
CLOSEST_CPA = 5
class TargetImpressionShareLocationEnum(object):
class TargetImpressionShareLocation(enum.IntEnum):
"""
Enum describing possible goals.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ANYWHERE_ON_PAGE (int): Any location on the web page.
TOP_OF_PAGE (int): Top box of ads.
ABSOLUTE_TOP_OF_PAGE (int): Top slot in the top box of ads.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ANYWHERE_ON_PAGE = 2
TOP_OF_PAGE = 3
ABSOLUTE_TOP_OF_PAGE = 4
class TargetingDimensionEnum(object):
class TargetingDimension(enum.IntEnum):
"""
Enum describing possible targeting dimensions.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
KEYWORD (int): Keyword criteria, e.g. 'mars cruise'. KEYWORD may be used as a custom bid
dimension. Keywords are always a targeting dimension, so may not be set
as a target "ALL" dimension with TargetRestriction.
AUDIENCE (int): Audience criteria, which include user list, user interest, custom
affinity, and custom in market.
TOPIC (int): Topic criteria for targeting categories of content, e.g.
'category::Animals>Pets' Used for Display and Video targeting.
GENDER (int): Criteria for targeting gender.
AGE_RANGE (int): Criteria for targeting age ranges.
PLACEMENT (int): Placement criteria, which include websites like 'www.flowers4sale.com',
as well as mobile applications, mobile app categories, YouTube videos,
and YouTube channels.
PARENTAL_STATUS (int): Criteria for parental status targeting.
INCOME_RANGE (int): Criteria for income range targeting.
"""
UNSPECIFIED = 0
UNKNOWN = 1
KEYWORD = 2
AUDIENCE = 3
TOPIC = 4
GENDER = 5
AGE_RANGE = 6
PLACEMENT = 7
PARENTAL_STATUS = 8
INCOME_RANGE = 9
class TimeTypeEnum(object):
class TimeType(enum.IntEnum):
"""
The possible time types used by certain resources as an alternative to
absolute timestamps.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
NOW (int): As soon as possible.
FOREVER (int): An infinite point in the future.
"""
UNSPECIFIED = 0
UNKNOWN = 1
NOW = 2
FOREVER = 3
class TrackingCodePageFormatEnum(object):
class TrackingCodePageFormat(enum.IntEnum):
"""
The format of the web page where the tracking tag and snippet will be
installed.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
HTML (int): Standard HTML page format.
AMP (int): Google AMP page format.
"""
UNSPECIFIED = 0
UNKNOWN = 1
HTML = 2
AMP = 3
class TrackingCodeTypeEnum(object):
class TrackingCodeType(enum.IntEnum):
"""
The type of the generated tag snippets for tracking conversions.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
WEBPAGE (int): The snippet that is fired as a result of a website page loading.
WEBPAGE_ONCLICK (int): The snippet contains a JavaScript function which fires the tag. This
function is typically called from an onClick handler added to a link or
button element on the page.
CLICK_TO_CALL (int): For embedding on a mobile webpage. The snippet contains a JavaScript
function which fires the tag.
"""
UNSPECIFIED = 0
UNKNOWN = 1
WEBPAGE = 2
WEBPAGE_ONCLICK = 3
CLICK_TO_CALL = 4
class TravelPlaceholderFieldEnum(object):
class TravelPlaceholderField(enum.IntEnum):
"""
Possible values for Travel placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
DESTINATION_ID (int): Data Type: STRING. Required. Destination id. Example: PAR, LON.
For feed items that only have destination id, destination id must be a
unique key. For feed items that have both destination id and origin id,
then the combination must be a unique key.
ORIGIN_ID (int): Data Type: STRING. Origin id. Example: PAR, LON. Combination of
DESTINATION\_ID and ORIGIN\_ID must be unique per offer.
TITLE (int): Data Type: STRING. Required. Main headline with name to be shown in
dynamic ad.
DESTINATION_NAME (int): Data Type: STRING. The destination name. Shorter names are recommended.
ORIGIN_NAME (int): Data Type: STRING. Origin name. Shorter names are recommended.
PRICE (int): Data Type: STRING. Price to be shown in the ad. Highly recommended for
dynamic ads.
Example: "100.00 USD"
FORMATTED_PRICE (int): Data Type: STRING. Formatted price to be shown in the ad.
Example: "Starting at $100.00 USD", "$80 - $100"
SALE_PRICE (int): Data Type: STRING. Sale price to be shown in the ad.
Example: "80.00 USD"
FORMATTED_SALE_PRICE (int): Data Type: STRING. Formatted sale price to be shown in the ad.
Example: "On sale for $80.00", "$60 - $80"
IMAGE_URL (int): Data Type: URL. Image to be displayed in the ad.
CATEGORY (int): Data Type: STRING. Category of travel offer used to group like items
together for recommendation engine.
CONTEXTUAL_KEYWORDS (int): Data Type: STRING\_LIST. Keywords used for product retrieval.
DESTINATION_ADDRESS (int): Data Type: STRING. Address of travel offer, including postal code.
FINAL_URL (int): Data Type: URL\_LIST. Required. Final URLs to be used in ad, when using
Upgraded URLs; the more specific the better (e.g. the individual URL of
a specific travel offer and its location).
FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. Final mobile URLs for the ad when using Upgraded
URLs.
TRACKING_URL (int): Data Type: URL. Tracking template for the ad when using Upgraded URLs.
ANDROID_APP_LINK (int): Data Type: STRING. Android app link. Must be formatted as:
android-app://{package\_id}/{scheme}/{host\_path}. The components are
defined as follows: package\_id: app ID as specified in Google Play.
scheme: the scheme to pass to the application. Can be HTTP, or a custom
scheme. host\_path: identifies the specific content within your
application.
SIMILAR_DESTINATION_IDS (int): Data Type: STRING\_LIST. List of recommended destination IDs to show
together with this item.
IOS_APP_LINK (int): Data Type: STRING. iOS app link.
IOS_APP_STORE_ID (int): Data Type: INT64. iOS app store ID.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DESTINATION_ID = 2
ORIGIN_ID = 3
TITLE = 4
DESTINATION_NAME = 5
ORIGIN_NAME = 6
PRICE = 7
FORMATTED_PRICE = 8
SALE_PRICE = 9
FORMATTED_SALE_PRICE = 10
IMAGE_URL = 11
CATEGORY = 12
CONTEXTUAL_KEYWORDS = 13
DESTINATION_ADDRESS = 14
FINAL_URL = 15
FINAL_MOBILE_URLS = 16
TRACKING_URL = 17
ANDROID_APP_LINK = 18
SIMILAR_DESTINATION_IDS = 19
IOS_APP_LINK = 20
IOS_APP_STORE_ID = 21
class UrlFieldErrorEnum(object):
class UrlFieldError(enum.IntEnum):
"""
Enum describing possible url field errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
INVALID_TRACKING_URL_TEMPLATE (int): The tracking url template is invalid.
INVALID_TAG_IN_TRACKING_URL_TEMPLATE (int): The tracking url template contains invalid tag.
MISSING_TRACKING_URL_TEMPLATE_TAG (int): The tracking url template must contain at least one tag (e.g. {lpurl}),
This applies only to tracking url template associated with website ads or
product ads.
MISSING_PROTOCOL_IN_TRACKING_URL_TEMPLATE (int): The tracking url template must start with a valid protocol (or lpurl
tag).
INVALID_PROTOCOL_IN_TRACKING_URL_TEMPLATE (int): The tracking url template starts with an invalid protocol.
MALFORMED_TRACKING_URL_TEMPLATE (int): The tracking url template contains illegal characters.
MISSING_HOST_IN_TRACKING_URL_TEMPLATE (int): The tracking url template must contain a host name (or lpurl tag).
INVALID_TLD_IN_TRACKING_URL_TEMPLATE (int): The tracking url template has an invalid or missing top level domain
extension.
REDUNDANT_NESTED_TRACKING_URL_TEMPLATE_TAG (int): The tracking url template contains nested occurrences of the same
conditional tag (i.e. {ifmobile:{ifmobile:x}}).
INVALID_FINAL_URL (int): The final url is invalid.
INVALID_TAG_IN_FINAL_URL (int): The final url contains invalid tag.
REDUNDANT_NESTED_FINAL_URL_TAG (int): The final url contains nested occurrences of the same conditional tag
(i.e. {ifmobile:{ifmobile:x}}).
MISSING_PROTOCOL_IN_FINAL_URL (int): The final url must start with a valid protocol.
INVALID_PROTOCOL_IN_FINAL_URL (int): The final url starts with an invalid protocol.
MALFORMED_FINAL_URL (int): The final url contains illegal characters.
MISSING_HOST_IN_FINAL_URL (int): The final url must contain a host name.
INVALID_TLD_IN_FINAL_URL (int): The tracking url template has an invalid or missing top level domain
extension.
INVALID_FINAL_MOBILE_URL (int): The final mobile url is invalid.
INVALID_TAG_IN_FINAL_MOBILE_URL (int): The final mobile url contains invalid tag.
REDUNDANT_NESTED_FINAL_MOBILE_URL_TAG (int): The final mobile url contains nested occurrences of the same conditional
tag (i.e. {ifmobile:{ifmobile:x}}).
MISSING_PROTOCOL_IN_FINAL_MOBILE_URL (int): The final mobile url must start with a valid protocol.
INVALID_PROTOCOL_IN_FINAL_MOBILE_URL (int): The final mobile url starts with an invalid protocol.
MALFORMED_FINAL_MOBILE_URL (int): The final mobile url contains illegal characters.
MISSING_HOST_IN_FINAL_MOBILE_URL (int): The final mobile url must contain a host name.
INVALID_TLD_IN_FINAL_MOBILE_URL (int): The tracking url template has an invalid or missing top level domain
extension.
INVALID_FINAL_APP_URL (int): The final app url is invalid.
INVALID_TAG_IN_FINAL_APP_URL (int): The final app url contains invalid tag.
REDUNDANT_NESTED_FINAL_APP_URL_TAG (int): The final app url contains nested occurrences of the same conditional tag
(i.e. {ifmobile:{ifmobile:x}}).
MULTIPLE_APP_URLS_FOR_OSTYPE (int): More than one app url found for the same OS type.
INVALID_OSTYPE (int): The OS type given for an app url is not valid.
INVALID_PROTOCOL_FOR_APP_URL (int): The protocol given for an app url is not valid. (E.g. "android-app://")
INVALID_PACKAGE_ID_FOR_APP_URL (int): The package id (app id) given for an app url is not valid.
URL_CUSTOM_PARAMETERS_COUNT_EXCEEDS_LIMIT (int): The number of url custom parameters for an resource exceeds the maximum
limit allowed.
INVALID_CHARACTERS_IN_URL_CUSTOM_PARAMETER_KEY (int): An invalid character appears in the parameter key.
INVALID_CHARACTERS_IN_URL_CUSTOM_PARAMETER_VALUE (int): An invalid character appears in the parameter value.
INVALID_TAG_IN_URL_CUSTOM_PARAMETER_VALUE (int): The url custom parameter value fails url tag validation.
REDUNDANT_NESTED_URL_CUSTOM_PARAMETER_TAG (int): The custom parameter contains nested occurrences of the same conditional
tag (i.e. {ifmobile:{ifmobile:x}}).
MISSING_PROTOCOL (int): The protocol (http:// or https://) is missing.
INVALID_PROTOCOL (int): Unsupported protocol in URL. Only http and https are supported.
INVALID_URL (int): The url is invalid.
DESTINATION_URL_DEPRECATED (int): Destination Url is deprecated.
INVALID_TAG_IN_URL (int): The url contains invalid tag.
MISSING_URL_TAG (int): The url must contain at least one tag (e.g. {lpurl}), This applies only
to urls associated with website ads or product ads.
DUPLICATE_URL_ID (int): Duplicate url id.
INVALID_URL_ID (int): Invalid url id.
FINAL_URL_SUFFIX_MALFORMED (int): The final url suffix cannot begin with '?' or '&' characters and must be
a valid query string.
INVALID_TAG_IN_FINAL_URL_SUFFIX (int): The final url suffix cannot contain {lpurl} related or {ignore} tags.
INVALID_TOP_LEVEL_DOMAIN (int): The top level domain is invalid, e.g, not a public top level domain
listed in publicsuffix.org.
MALFORMED_TOP_LEVEL_DOMAIN (int): Malformed top level domain in URL.
MALFORMED_URL (int): Malformed URL.
MISSING_HOST (int): No host found in URL.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_TRACKING_URL_TEMPLATE = 2
INVALID_TAG_IN_TRACKING_URL_TEMPLATE = 3
MISSING_TRACKING_URL_TEMPLATE_TAG = 4
MISSING_PROTOCOL_IN_TRACKING_URL_TEMPLATE = 5
INVALID_PROTOCOL_IN_TRACKING_URL_TEMPLATE = 6
MALFORMED_TRACKING_URL_TEMPLATE = 7
MISSING_HOST_IN_TRACKING_URL_TEMPLATE = 8
INVALID_TLD_IN_TRACKING_URL_TEMPLATE = 9
REDUNDANT_NESTED_TRACKING_URL_TEMPLATE_TAG = 10
INVALID_FINAL_URL = 11
INVALID_TAG_IN_FINAL_URL = 12
REDUNDANT_NESTED_FINAL_URL_TAG = 13
MISSING_PROTOCOL_IN_FINAL_URL = 14
INVALID_PROTOCOL_IN_FINAL_URL = 15
MALFORMED_FINAL_URL = 16
MISSING_HOST_IN_FINAL_URL = 17
INVALID_TLD_IN_FINAL_URL = 18
INVALID_FINAL_MOBILE_URL = 19
INVALID_TAG_IN_FINAL_MOBILE_URL = 20
REDUNDANT_NESTED_FINAL_MOBILE_URL_TAG = 21
MISSING_PROTOCOL_IN_FINAL_MOBILE_URL = 22
INVALID_PROTOCOL_IN_FINAL_MOBILE_URL = 23
MALFORMED_FINAL_MOBILE_URL = 24
MISSING_HOST_IN_FINAL_MOBILE_URL = 25
INVALID_TLD_IN_FINAL_MOBILE_URL = 26
INVALID_FINAL_APP_URL = 27
INVALID_TAG_IN_FINAL_APP_URL = 28
REDUNDANT_NESTED_FINAL_APP_URL_TAG = 29
MULTIPLE_APP_URLS_FOR_OSTYPE = 30
INVALID_OSTYPE = 31
INVALID_PROTOCOL_FOR_APP_URL = 32
INVALID_PACKAGE_ID_FOR_APP_URL = 33
URL_CUSTOM_PARAMETERS_COUNT_EXCEEDS_LIMIT = 34
INVALID_CHARACTERS_IN_URL_CUSTOM_PARAMETER_KEY = 39
INVALID_CHARACTERS_IN_URL_CUSTOM_PARAMETER_VALUE = 40
INVALID_TAG_IN_URL_CUSTOM_PARAMETER_VALUE = 41
REDUNDANT_NESTED_URL_CUSTOM_PARAMETER_TAG = 42
MISSING_PROTOCOL = 43
INVALID_PROTOCOL = 52
INVALID_URL = 44
DESTINATION_URL_DEPRECATED = 45
INVALID_TAG_IN_URL = 46
MISSING_URL_TAG = 47
DUPLICATE_URL_ID = 48
INVALID_URL_ID = 49
FINAL_URL_SUFFIX_MALFORMED = 50
INVALID_TAG_IN_FINAL_URL_SUFFIX = 51
INVALID_TOP_LEVEL_DOMAIN = 53
MALFORMED_TOP_LEVEL_DOMAIN = 54
MALFORMED_URL = 55
MISSING_HOST = 56
class UserInterestTaxonomyTypeEnum(object):
class UserInterestTaxonomyType(enum.IntEnum):
"""
Enum containing the possible UserInterestTaxonomyTypes.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
AFFINITY (int): The affinity for this user interest.
IN_MARKET (int): The market for this user interest.
MOBILE_APP_INSTALL_USER (int): Users known to have installed applications in the specified categories.
VERTICAL_GEO (int): The geographical location of the interest-based vertical.
NEW_SMART_PHONE_USER (int): User interest criteria for new smart phone users.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AFFINITY = 2
IN_MARKET = 3
MOBILE_APP_INSTALL_USER = 4
VERTICAL_GEO = 5
NEW_SMART_PHONE_USER = 6
class UserListAccessStatusEnum(object):
class UserListAccessStatus(enum.IntEnum):
"""
Enum containing possible user list access statuses.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ENABLED (int): The access is enabled.
DISABLED (int): The access is disabled.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
DISABLED = 3
class UserListClosingReasonEnum(object):
class UserListClosingReason(enum.IntEnum):
"""
Enum describing possible user list closing reasons.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
UNUSED (int): The userlist was closed because of not being used for over one year.
"""
UNSPECIFIED = 0
UNKNOWN = 1
UNUSED = 2
class UserListCombinedRuleOperatorEnum(object):
class UserListCombinedRuleOperator(enum.IntEnum):
"""
Enum describing possible user list combined rule operators.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
AND (int): A AND B.
AND_NOT (int): A AND NOT B.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AND = 2
AND_NOT = 3
class UserListCrmDataSourceTypeEnum(object):
class UserListCrmDataSourceType(enum.IntEnum):
"""
Enum describing possible user list crm data source type.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
FIRST_PARTY (int): The uploaded data is first-party data.
THIRD_PARTY_CREDIT_BUREAU (int): The uploaded data is from a third-party credit bureau.
THIRD_PARTY_VOTER_FILE (int): The uploaded data is from a third-party voter file.
"""
UNSPECIFIED = 0
UNKNOWN = 1
FIRST_PARTY = 2
THIRD_PARTY_CREDIT_BUREAU = 3
THIRD_PARTY_VOTER_FILE = 4
class UserListDateRuleItemOperatorEnum(object):
class UserListDateRuleItemOperator(enum.IntEnum):
"""
Enum describing possible user list date rule item operators.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
EQUALS (int): Equals.
NOT_EQUALS (int): Not Equals.
BEFORE (int): Before.
AFTER (int): After.
"""
UNSPECIFIED = 0
UNKNOWN = 1
EQUALS = 2
NOT_EQUALS = 3
BEFORE = 4
AFTER = 5
class UserListErrorEnum(object):
class UserListError(enum.IntEnum):
"""
Enum describing possible user list errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
EXTERNAL_REMARKETING_USER_LIST_MUTATE_NOT_SUPPORTED (int): Creating and updating external remarketing user lists is not supported.
CONCRETE_TYPE_REQUIRED (int): Concrete type of user list is required.
CONVERSION_TYPE_ID_REQUIRED (int): Creating/updating user list conversion types requires specifying the
conversion type Id.
DUPLICATE_CONVERSION_TYPES (int): Remarketing user list cannot have duplicate conversion types.
INVALID_CONVERSION_TYPE (int): Conversion type is invalid/unknown.
INVALID_DESCRIPTION (int): User list description is empty or invalid.
INVALID_NAME (int): User list name is empty or invalid.
INVALID_TYPE (int): Type of the UserList does not match.
CAN_NOT_ADD_LOGICAL_LIST_AS_LOGICAL_LIST_OPERAND (int): Embedded logical user lists are not allowed.
INVALID_USER_LIST_LOGICAL_RULE_OPERAND (int): User list rule operand is invalid.
NAME_ALREADY_USED (int): Name is already being used for another user list for the account.
NEW_CONVERSION_TYPE_NAME_REQUIRED (int): Name is required when creating a new conversion type.
CONVERSION_TYPE_NAME_ALREADY_USED (int): The given conversion type name has been used.
OWNERSHIP_REQUIRED_FOR_SET (int): Only an owner account may edit a user list.
USER_LIST_MUTATE_NOT_SUPPORTED (int): Creating user list without setting type in oneof user\_list field, or
creating/updating read-only user list types is not allowed.
INVALID_RULE (int): Rule is invalid.
INVALID_DATE_RANGE (int): The specified date range is empty.
CAN_NOT_MUTATE_SENSITIVE_USERLIST (int): A UserList which is privacy sensitive or legal rejected cannot be mutated
by external users.
MAX_NUM_RULEBASED_USERLISTS (int): Maximum number of rulebased user lists a customer can have.
CANNOT_MODIFY_BILLABLE_RECORD_COUNT (int): BasicUserList's billable record field cannot be modified once it is set.
APP_ID_NOT_SET (int): crm\_based\_user\_list.app\_id field must be set when upload\_key\_type
is MOBILE\_ADVERTISING\_ID.
USERLIST_NAME_IS_RESERVED_FOR_SYSTEM_LIST (int): Name of the user list is reserved for system generated lists and cannot
be used.
ADVERTISER_NOT_WHITELISTED_FOR_USING_UPLOADED_DATA (int): Advertiser needs to be whitelisted to use remarketing lists created from
advertiser uploaded data (e.g., Customer Match lists).
RULE_TYPE_IS_NOT_SUPPORTED (int): The provided rule\_type is not supported for the user list.
CAN_NOT_ADD_A_SIMILAR_USERLIST_AS_LOGICAL_LIST_OPERAND (int): Similar user list cannot be used as a logical user list operand.
CAN_NOT_MIX_CRM_BASED_IN_LOGICAL_LIST_WITH_OTHER_LISTS (int): Logical user list should not have a mix of CRM based user list and other
types of lists in its rules.
"""
UNSPECIFIED = 0
UNKNOWN = 1
EXTERNAL_REMARKETING_USER_LIST_MUTATE_NOT_SUPPORTED = 2
CONCRETE_TYPE_REQUIRED = 3
CONVERSION_TYPE_ID_REQUIRED = 4
DUPLICATE_CONVERSION_TYPES = 5
INVALID_CONVERSION_TYPE = 6
INVALID_DESCRIPTION = 7
INVALID_NAME = 8
INVALID_TYPE = 9
CAN_NOT_ADD_LOGICAL_LIST_AS_LOGICAL_LIST_OPERAND = 10
INVALID_USER_LIST_LOGICAL_RULE_OPERAND = 11
NAME_ALREADY_USED = 12
NEW_CONVERSION_TYPE_NAME_REQUIRED = 13
CONVERSION_TYPE_NAME_ALREADY_USED = 14
OWNERSHIP_REQUIRED_FOR_SET = 15
USER_LIST_MUTATE_NOT_SUPPORTED = 16
INVALID_RULE = 17
INVALID_DATE_RANGE = 27
CAN_NOT_MUTATE_SENSITIVE_USERLIST = 28
MAX_NUM_RULEBASED_USERLISTS = 29
CANNOT_MODIFY_BILLABLE_RECORD_COUNT = 30
APP_ID_NOT_SET = 31
USERLIST_NAME_IS_RESERVED_FOR_SYSTEM_LIST = 32
ADVERTISER_NOT_WHITELISTED_FOR_USING_UPLOADED_DATA = 33
RULE_TYPE_IS_NOT_SUPPORTED = 34
CAN_NOT_ADD_A_SIMILAR_USERLIST_AS_LOGICAL_LIST_OPERAND = 35
CAN_NOT_MIX_CRM_BASED_IN_LOGICAL_LIST_WITH_OTHER_LISTS = 36
class UserListLogicalRuleOperatorEnum(object):
class UserListLogicalRuleOperator(enum.IntEnum):
"""
Enum describing possible user list logical rule operators.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ALL (int): And - all of the operands.
ANY (int): Or - at least one of the operands.
NONE (int): Not - none of the operands.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ALL = 2
ANY = 3
NONE = 4
class UserListMembershipStatusEnum(object):
class UserListMembershipStatus(enum.IntEnum):
"""
Enum containing possible user list membership statuses.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
OPEN (int): Open status - List is accruing members and can be targeted to.
CLOSED (int): Closed status - No new members being added. Cannot be used for targeting.
"""
UNSPECIFIED = 0
UNKNOWN = 1
OPEN = 2
CLOSED = 3
class UserListNumberRuleItemOperatorEnum(object):
class UserListNumberRuleItemOperator(enum.IntEnum):
"""
Enum describing possible user list number rule item operators.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
GREATER_THAN (int): Greater than.
GREATER_THAN_OR_EQUAL (int): Greater than or equal.
EQUALS (int): Equals.
NOT_EQUALS (int): Not equals.
LESS_THAN (int): Less than.
LESS_THAN_OR_EQUAL (int): Less than or equal.
"""
UNSPECIFIED = 0
UNKNOWN = 1
GREATER_THAN = 2
GREATER_THAN_OR_EQUAL = 3
EQUALS = 4
NOT_EQUALS = 5
LESS_THAN = 6
LESS_THAN_OR_EQUAL = 7
class UserListPrepopulationStatusEnum(object):
class UserListPrepopulationStatus(enum.IntEnum):
"""
Enum describing possible user list prepopulation status.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
REQUESTED (int): Prepopoulation is being requested.
FINISHED (int): Prepopulation is finished.
FAILED (int): Prepopulation failed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
REQUESTED = 2
FINISHED = 3
FAILED = 4
class UserListRuleTypeEnum(object):
class UserListRuleType(enum.IntEnum):
"""
Enum describing possible user list rule types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
AND_OF_ORS (int): Conjunctive normal form.
OR_OF_ANDS (int): Disjunctive normal form.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AND_OF_ORS = 2
OR_OF_ANDS = 3
class UserListSizeRangeEnum(object):
class UserListSizeRange(enum.IntEnum):
"""
Enum containing possible user list size ranges.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
LESS_THAN_FIVE_HUNDRED (int): User list has less than 500 users.
LESS_THAN_ONE_THOUSAND (int): User list has number of users in range of 500 to 1000.
ONE_THOUSAND_TO_TEN_THOUSAND (int): User list has number of users in range of 1000 to 10000.
TEN_THOUSAND_TO_FIFTY_THOUSAND (int): User list has number of users in range of 10000 to 50000.
FIFTY_THOUSAND_TO_ONE_HUNDRED_THOUSAND (int): User list has number of users in range of 50000 to 100000.
ONE_HUNDRED_THOUSAND_TO_THREE_HUNDRED_THOUSAND (int): User list has number of users in range of 100000 to 300000.
THREE_HUNDRED_THOUSAND_TO_FIVE_HUNDRED_THOUSAND (int): User list has number of users in range of 300000 to 500000.
FIVE_HUNDRED_THOUSAND_TO_ONE_MILLION (int): User list has number of users in range of 500000 to 1 million.
ONE_MILLION_TO_TWO_MILLION (int): User list has number of users in range of 1 to 2 millions.
TWO_MILLION_TO_THREE_MILLION (int): User list has number of users in range of 2 to 3 millions.
THREE_MILLION_TO_FIVE_MILLION (int): User list has number of users in range of 3 to 5 millions.
FIVE_MILLION_TO_TEN_MILLION (int): User list has number of users in range of 5 to 10 millions.
TEN_MILLION_TO_TWENTY_MILLION (int): User list has number of users in range of 10 to 20 millions.
TWENTY_MILLION_TO_THIRTY_MILLION (int): User list has number of users in range of 20 to 30 millions.
THIRTY_MILLION_TO_FIFTY_MILLION (int): User list has number of users in range of 30 to 50 millions.
OVER_FIFTY_MILLION (int): User list has over 50 million users.
"""
UNSPECIFIED = 0
UNKNOWN = 1
LESS_THAN_FIVE_HUNDRED = 2
LESS_THAN_ONE_THOUSAND = 3
ONE_THOUSAND_TO_TEN_THOUSAND = 4
TEN_THOUSAND_TO_FIFTY_THOUSAND = 5
FIFTY_THOUSAND_TO_ONE_HUNDRED_THOUSAND = 6
ONE_HUNDRED_THOUSAND_TO_THREE_HUNDRED_THOUSAND = 7
THREE_HUNDRED_THOUSAND_TO_FIVE_HUNDRED_THOUSAND = 8
FIVE_HUNDRED_THOUSAND_TO_ONE_MILLION = 9
ONE_MILLION_TO_TWO_MILLION = 10
TWO_MILLION_TO_THREE_MILLION = 11
THREE_MILLION_TO_FIVE_MILLION = 12
FIVE_MILLION_TO_TEN_MILLION = 13
TEN_MILLION_TO_TWENTY_MILLION = 14
TWENTY_MILLION_TO_THIRTY_MILLION = 15
THIRTY_MILLION_TO_FIFTY_MILLION = 16
OVER_FIFTY_MILLION = 17
class UserListStringRuleItemOperatorEnum(object):
class UserListStringRuleItemOperator(enum.IntEnum):
"""
Enum describing possible user list string rule item operators.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
CONTAINS (int): Contains.
EQUALS (int): Equals.
STARTS_WITH (int): Starts with.
ENDS_WITH (int): Ends with.
NOT_EQUALS (int): Not equals.
NOT_CONTAINS (int): Not contains.
NOT_STARTS_WITH (int): Not starts with.
NOT_ENDS_WITH (int): Not ends with.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CONTAINS = 2
EQUALS = 3
STARTS_WITH = 4
ENDS_WITH = 5
NOT_EQUALS = 6
NOT_CONTAINS = 7
NOT_STARTS_WITH = 8
NOT_ENDS_WITH = 9
class UserListTypeEnum(object):
class UserListType(enum.IntEnum):
"""
Enum containing possible user list types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
REMARKETING (int): UserList represented as a collection of conversion types.
LOGICAL (int): UserList represented as a combination of other user lists/interests.
EXTERNAL_REMARKETING (int): UserList created in the Google Ad Manager platform.
RULE_BASED (int): UserList associated with a rule.
SIMILAR (int): UserList with users similar to users of another UserList.
CRM_BASED (int): UserList of first-party CRM data provided by advertiser in the form of
emails or other formats.
"""
UNSPECIFIED = 0
UNKNOWN = 1
REMARKETING = 2
LOGICAL = 3
EXTERNAL_REMARKETING = 4
RULE_BASED = 5
SIMILAR = 6
CRM_BASED = 7
class VanityPharmaDisplayUrlModeEnum(object):
class VanityPharmaDisplayUrlMode(enum.IntEnum):
"""
Enum describing possible display modes for vanity pharma URLs.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
MANUFACTURER_WEBSITE_URL (int): Replace vanity pharma URL with manufacturer website url.
WEBSITE_DESCRIPTION (int): Replace vanity pharma URL with description of the website.
"""
UNSPECIFIED = 0
UNKNOWN = 1
MANUFACTURER_WEBSITE_URL = 2
WEBSITE_DESCRIPTION = 3
class VanityPharmaTextEnum(object):
class VanityPharmaText(enum.IntEnum):
"""
Enum describing possible text.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PRESCRIPTION_TREATMENT_WEBSITE_EN (int): Prescription treatment website with website content in English.
PRESCRIPTION_TREATMENT_WEBSITE_ES (int): Prescription treatment website with website content in Spanish
(Sitio de tratamientos con receta).
PRESCRIPTION_DEVICE_WEBSITE_EN (int): Prescription device website with website content in English.
PRESCRIPTION_DEVICE_WEBSITE_ES (int): Prescription device website with website content in Spanish (Sitio de
dispositivos con receta).
MEDICAL_DEVICE_WEBSITE_EN (int): Medical device website with website content in English.
MEDICAL_DEVICE_WEBSITE_ES (int): Medical device website with website content in Spanish (Sitio de
dispositivos mรฉdicos).
PREVENTATIVE_TREATMENT_WEBSITE_EN (int): Preventative treatment website with website content in English.
PREVENTATIVE_TREATMENT_WEBSITE_ES (int): Preventative treatment website with website content in Spanish (Sitio de
tratamientos preventivos).
PRESCRIPTION_CONTRACEPTION_WEBSITE_EN (int): Prescription contraception website with website content in English.
PRESCRIPTION_CONTRACEPTION_WEBSITE_ES (int): Prescription contraception website with website content in Spanish (Sitio
de anticonceptivos con receta).
PRESCRIPTION_VACCINE_WEBSITE_EN (int): Prescription vaccine website with website content in English.
PRESCRIPTION_VACCINE_WEBSITE_ES (int): Prescription vaccine website with website content in Spanish (Sitio de
vacunas con receta).
"""
UNSPECIFIED = 0
UNKNOWN = 1
PRESCRIPTION_TREATMENT_WEBSITE_EN = 2
PRESCRIPTION_TREATMENT_WEBSITE_ES = 3
PRESCRIPTION_DEVICE_WEBSITE_EN = 4
PRESCRIPTION_DEVICE_WEBSITE_ES = 5
MEDICAL_DEVICE_WEBSITE_EN = 6
MEDICAL_DEVICE_WEBSITE_ES = 7
PREVENTATIVE_TREATMENT_WEBSITE_EN = 8
PREVENTATIVE_TREATMENT_WEBSITE_ES = 9
PRESCRIPTION_CONTRACEPTION_WEBSITE_EN = 10
PRESCRIPTION_CONTRACEPTION_WEBSITE_ES = 11
PRESCRIPTION_VACCINE_WEBSITE_EN = 12
PRESCRIPTION_VACCINE_WEBSITE_ES = 13
class WebpageConditionOperandEnum(object):
class WebpageConditionOperand(enum.IntEnum):
"""
The webpage condition operand in webpage criterion.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
URL (int): Operand denoting a webpage URL targeting condition.
CATEGORY (int): Operand denoting a webpage category targeting condition.
PAGE_TITLE (int): Operand denoting a webpage title targeting condition.
PAGE_CONTENT (int): Operand denoting a webpage content targeting condition.
CUSTOM_LABEL (int): Operand denoting a webpage custom label targeting condition.
"""
UNSPECIFIED = 0
UNKNOWN = 1
URL = 2
CATEGORY = 3
PAGE_TITLE = 4
PAGE_CONTENT = 5
CUSTOM_LABEL = 6
class WebpageConditionOperatorEnum(object):
class WebpageConditionOperator(enum.IntEnum):
"""
The webpage condition operator in webpage criterion.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
EQUALS (int): The argument web condition is equal to the compared web condition.
CONTAINS (int): The argument web condition is part of the compared web condition.
"""
UNSPECIFIED = 0
UNKNOWN = 1
EQUALS = 2
CONTAINS = 3
|
"""
Module for managing a sensor via KNX.
It provides functionality for
* reading the current state from KNX bus.
* watching for state updates from KNX bus.
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Iterator
from xknx.remote_value import (
GroupAddressesType,
RemoteValue,
RemoteValueControl,
RemoteValueSensor,
)
from .device import Device, DeviceCallbackType
if TYPE_CHECKING:
from xknx.telegram import Telegram
from xknx.xknx import XKNX
class Sensor(Device):
"""Class for managing a sensor."""
def __init__(
self,
xknx: XKNX,
name: str,
group_address_state: GroupAddressesType | None = None,
sync_state: bool | int | float | str = True,
always_callback: bool = False,
value_type: int | str | None = None,
device_updated_cb: DeviceCallbackType[Sensor] | None = None,
):
"""Initialize Sensor class."""
super().__init__(xknx, name, device_updated_cb)
self.sensor_value: RemoteValueControl | RemoteValueSensor
if isinstance(value_type, str) and value_type in [
"stepwise_dimming",
"stepwise_blinds",
"startstop_dimming",
"startstop_blinds",
]:
self.sensor_value = RemoteValueControl(
xknx,
group_address_state=group_address_state,
sync_state=sync_state,
value_type=value_type,
device_name=self.name,
after_update_cb=self.after_update,
)
else:
self.sensor_value = RemoteValueSensor(
xknx,
group_address_state=group_address_state,
sync_state=sync_state,
value_type=value_type,
device_name=self.name,
after_update_cb=self.after_update,
)
self.always_callback = always_callback
def _iter_remote_values(self) -> Iterator[RemoteValue[Any, Any]]:
"""Iterate the devices RemoteValue classes."""
yield self.sensor_value
@property
def last_telegram(self) -> Telegram | None:
"""Return the last telegram received from the RemoteValue."""
return self.sensor_value.telegram
async def process_group_write(self, telegram: "Telegram") -> None:
"""Process incoming and outgoing GROUP WRITE telegram."""
await self.sensor_value.process(telegram, always_callback=self.always_callback)
async def process_group_response(self, telegram: "Telegram") -> None:
"""Process incoming GroupValueResponse telegrams."""
await self.sensor_value.process(telegram)
def unit_of_measurement(self) -> str | None:
"""Return the unit of measurement."""
return self.sensor_value.unit_of_measurement
def ha_device_class(self) -> str | None:
"""Return the home assistant device class as string."""
return self.sensor_value.ha_device_class
def resolve_state(self) -> Any | None:
"""Return the current state of the sensor as a human readable string."""
return self.sensor_value.value
def __str__(self) -> str:
"""Return object as readable string."""
return (
f'<Sensor name="{self.name}" '
f"sensor={self.sensor_value.group_addr_str()} "
f"value={self.resolve_state().__repr__()} "
f'unit="{self.unit_of_measurement()}"/>'
)
|
#!/usr/bin/python
"""Linear topology with one computer, one router and one client"""
import time
import experiment
import topo
class LinearQuic(experiment.Experiment):
"""One edge controller in the leftmost host followed by an edge computer, an edge router and an edge client in the rightmost one"""
def __init__(self, **kwargs):
experiment.Experiment.__init__(self, **kwargs)
self.experiment_id = '{}.d.{}.s.{}.e.{}'.format(
self.confopts['bw'],
self.confopts['delay'],
self.confopts['size'],
self.confopts['experiment']
)
def setupEdgeResources(self):
"""Set up edge router and computers"""
# one edgecomputer on hosts[1]
self.servers = [self.hosts[1]]
# one edegrouter on hosts[2]
self.routers = [self.hosts[2]]
print "Starting edge router"
if self.confopts['experiment'] == 'grpc':
router_server_conf = 'grpc,persistence=0.05 '
elif self.confopts['experiment'] == 'quic':
router_server_conf = 'quic,attempt-early-data=false '
else: # quic-0rtt
router_server_conf = 'quic,attempt-early-data=true '
router_cmd = (
'{} --server-conf type={}').format(
self.edgeRouterCmd(
ip=self.hosts[2].IP(),
num_threads=5,
num_pending_clients=5),
router_server_conf)
self.cmd.run(
self.hosts[2],
router_cmd,
is_background=True)
time.sleep(1)
print "Starting edge computers"
lambda_name = 'clambda0'
conf_file_name = 'computer.json'
self.makeComputerJson(
conf_file_name,
[lambda_name],
speed=float(1e12),
num_cores=2,
op_coeff=1e4,
op_offset=0,
)
computer_server_conf = router_server_conf if self.confopts[
'experiment'] == 'grpc' else 'quic ' # attempt early data not needed for edgecomputer (only server side)
computer_conf = (
'../bin/edgecomputer '
'--conf type=file,path={} '
'--num-thread 10 '
'--server-conf type={}'
'{}').format(
conf_file_name,
computer_server_conf,
self.controller_conf)
# assign the computer's end-points
lambda_proc_endpoint = '{}:10000'.format(self.hosts[1].IP())
util_collector_endpoint = '{}:20000'.format(self.hosts[1].IP())
self.cmd.run(
self.hosts[1],
'{} --server-endpoint {} --utilization {}'.format(
computer_conf,
lambda_proc_endpoint,
util_collector_endpoint),
is_background=True)
if self.hosts[1] not in self.computers:
self.computers[self.hosts[1]] = []
self.computers[self.hosts[1]].append(
[lambda_proc_endpoint, util_collector_endpoint])
def createTraffic(self):
"Create lambda request clients"
# one edgeclient on host[3]
self.clients = [self.hosts[3]]
print 'Client: {}'.format(self.clients)
print "Starting traffic"
pids = []
counter = 0
if(self.confopts['experiment'] == 'grpc'):
client_conf = 'grpc,persistence=0.05'
elif(self.confopts['experiment'] == 'quic'):
client_conf = 'quic,attempt-early-data=false'
else:
client_conf = 'quic,attempt-early-data=true'
server_endpoint = '{}:{}'.format(
self.hosts[1].IP(), # computer IP
10000) # computer port
pids.append(self.cmd.run(
self.hosts[3],
('../bin/edgeclient '
'--server-endpoint {} '
'--client-conf type={} '
'--num-threads 5 '
'--inter-request-time 0.1 '
'--lambda clambda0 '
'--duration {} '
'--sizes {} '
'--seed {} '
'--output-file {}').format(
server_endpoint,
client_conf,
self.confopts['duration'],
self.confopts['size'],
1000 * self.confopts['seed'] + counter,
'results/out.{}.{}'.format(self.experiment_id, counter)),
is_background=True))
counter += 1
print "Waiting for the experiment to complete"
# time.sleep(self.confopts['duration'])
# print "Time is up!"
# pids[0][0].cmd('kill', pids[0][1])
pids[0][0].cmd('wait', pids[0][1])
if __name__ == '__main__':
args = experiment.Experiment.makeArgumentParser(
{
'experiment': [
str,
'grpc',
'experiment type, one of: grpc, quic, quic0rtt'],
'bw': [
float,
100,
'link bandwidth, one of: 0.01, 0.1, 1, 10, 100, 1000'
],
'delay': [
str,
'100us',
'link delays, one of: 1us, 100us, 10ms'
],
'size': [
int,
1000,
'lambda request input size, one of: 100, 1000, 10000'
]
},
{
'cpu_limit': -1.0,
'nat': False,
'controller': 'flat',
'balancing': 'rr-async',
'edgetype': 'router',
'tclink': 'wotc'
})
experiment = LinearQuic(**vars(args))
experiment.runExperiment(
topo.LinearTopo(
4, # hosts
# linkargs
dict(
bw=experiment.confopts['bw'], # <<<<<<<<<<<<<<<<
delay=experiment.confopts['delay'], # <<<<<<<<<<<
loss=0,
max_queue_size=1000,
use_htb=True),
dict())) # hostargs
|
from rest_framework import serializers
from api.src.MH.MHModel import MH
class MHSerializer(serializers.ModelSerializer):
class Meta:
model = MH
fields = '__all__'
|
from __future__ import absolute_import, unicode_literals
import collections
import contextlib
import logging
import operator
from mopidy import compat, exceptions, models
from mopidy.compat import urllib
from mopidy.internal import validation
logger = logging.getLogger(__name__)
@contextlib.contextmanager
def _backend_error_handling(backend, reraise=None):
try:
yield
except exceptions.ValidationError as e:
logger.error('%s backend returned bad data: %s',
backend.actor_ref.actor_class.__name__, e)
except Exception as e:
if reraise and isinstance(e, reraise):
raise
logger.exception('%s backend caused an exception.',
backend.actor_ref.actor_class.__name__)
class LibraryController(object):
pykka_traversable = True
def __init__(self, backends, core):
self.backends = backends
self.core = core
def _get_backend(self, uri):
uri_scheme = urllib.parse.urlparse(uri).scheme
return self.backends.with_library.get(uri_scheme, None)
def _get_backends_to_uris(self, uris):
if uris:
backends_to_uris = collections.defaultdict(list)
for uri in uris:
backend = self._get_backend(uri)
if backend is not None:
backends_to_uris[backend].append(uri)
else:
backends_to_uris = {
b: None for b in self.backends.with_library.values()}
return backends_to_uris
def browse(self, uri):
"""
Browse directories and tracks at the given ``uri``.
``uri`` is a string which represents some directory belonging to a
backend. To get the intial root directories for backends pass
:class:`None` as the URI.
Returns a list of :class:`mopidy.models.Ref` objects for the
directories and tracks at the given ``uri``.
The :class:`~mopidy.models.Ref` objects representing tracks keep the
track's original URI. A matching pair of objects can look like this::
Track(uri='dummy:/foo.mp3', name='foo', artists=..., album=...)
Ref.track(uri='dummy:/foo.mp3', name='foo')
The :class:`~mopidy.models.Ref` objects representing directories have
backend specific URIs. These are opaque values, so no one but the
backend that created them should try and derive any meaning from them.
The only valid exception to this is checking the scheme, as it is used
to route browse requests to the correct backend.
For example, the dummy library's ``/bar`` directory could be returned
like this::
Ref.directory(uri='dummy:directory:/bar', name='bar')
:param string uri: URI to browse
:rtype: list of :class:`mopidy.models.Ref`
.. versionadded:: 0.18
"""
if uri is None:
return self._roots()
elif not uri.strip():
return []
validation.check_uri(uri)
return self._browse(uri)
def _roots(self):
directories = set()
backends = self.backends.with_library_browse.values()
futures = {b: b.library.root_directory for b in backends}
for backend, future in futures.items():
with _backend_error_handling(backend):
root = future.get()
validation.check_instance(root, models.Ref)
directories.add(root)
return sorted(directories, key=operator.attrgetter('name'))
def _browse(self, uri):
scheme = urllib.parse.urlparse(uri).scheme
backend = self.backends.with_library_browse.get(scheme)
if not backend:
return []
with _backend_error_handling(backend):
result = backend.library.browse(uri).get()
validation.check_instances(result, models.Ref)
return result
return []
def get_distinct(self, field, query=None):
"""
List distinct values for a given field from the library.
This has mainly been added to support the list commands the MPD
protocol supports in a more sane fashion. Other frontends are not
recommended to use this method.
:param string field: One of ``track``, ``artist``, ``albumartist``,
``album``, ``composer``, ``performer``, ``date`` or ``genre``.
:param dict query: Query to use for limiting results, see
:meth:`search` for details about the query format.
:rtype: set of values corresponding to the requested field type.
.. versionadded:: 1.0
"""
validation.check_choice(field, validation.DISTINCT_FIELDS)
query is None or validation.check_query(query) # TODO: normalize?
result = set()
futures = {b: b.library.get_distinct(field, query)
for b in self.backends.with_library.values()}
for backend, future in futures.items():
with _backend_error_handling(backend):
values = future.get()
if values is not None:
validation.check_instances(values, compat.text_type)
result.update(values)
return result
def get_images(self, uris):
"""Lookup the images for the given URIs
Backends can use this to return image URIs for any URI they know about
be it tracks, albums, playlists. The lookup result is a dictionary
mapping the provided URIs to lists of images.
Unknown URIs or URIs the corresponding backend couldn't find anything
for will simply return an empty list for that URI.
:param uris: list of URIs to find images for
:type uris: list of string
:rtype: {uri: tuple of :class:`mopidy.models.Image`}
.. versionadded:: 1.0
"""
validation.check_uris(uris)
futures = {
backend: backend.library.get_images(backend_uris)
for (backend, backend_uris)
in self._get_backends_to_uris(uris).items() if backend_uris}
results = {uri: tuple() for uri in uris}
for backend, future in futures.items():
with _backend_error_handling(backend):
if future.get() is None:
continue
validation.check_instance(future.get(), collections.Mapping)
for uri, images in future.get().items():
if uri not in uris:
raise exceptions.ValidationError(
'Got unknown image URI: %s' % uri)
validation.check_instances(images, models.Image)
results[uri] += tuple(images)
return results
def lookup(self, uris):
"""
Lookup the given URIs.
If the URI expands to multiple tracks, the returned list will contain
them all.
:param uris: track URIs
:type uris: list of string
:rtype: {uri: list of :class:`mopidy.models.Track`}
"""
validation.check_uris(uris)
futures = {}
results = {u: [] for u in uris}
# TODO: lookup(uris) to backend APIs
for backend, backend_uris in self._get_backends_to_uris(uris).items():
if backend_uris:
for u in backend_uris:
futures[(backend, u)] = backend.library.lookup(u)
for (backend, u), future in futures.items():
with _backend_error_handling(backend):
result = future.get()
if result is not None:
validation.check_instances(result, models.Track)
# TODO Consider making Track.uri field mandatory, and
# then remove this filtering of tracks without URIs.
results[u] = [r for r in result if r.uri]
return results
def refresh(self, uri=None):
"""
Refresh library. Limit to URI and below if an URI is given.
:param uri: directory or track URI
:type uri: string
"""
uri is None or validation.check_uri(uri)
futures = {}
backends = {}
uri_scheme = urllib.parse.urlparse(uri).scheme if uri else None
for backend_scheme, backend in self.backends.with_library.items():
backends.setdefault(backend, set()).add(backend_scheme)
for backend, backend_schemes in backends.items():
if uri_scheme is None or uri_scheme in backend_schemes:
futures[backend] = backend.library.refresh(uri)
for backend, future in futures.items():
with _backend_error_handling(backend):
future.get()
def search(self, query, uris=None, exact=False):
"""
Search the library for tracks where ``field`` contains ``values``.
``field`` can be one of ``uri``, ``track_name``, ``album``, ``artist``,
``albumartist``, ``composer``, ``performer``, ``track_no``, ``genre``,
``date``, ``comment``, or ``any``.
If ``uris`` is given, the search is limited to results from within the
URI roots. For example passing ``uris=['file:']`` will limit the search
to the local backend.
Examples::
# Returns results matching 'a' in any backend
search({'any': ['a']})
# Returns results matching artist 'xyz' in any backend
search({'artist': ['xyz']})
# Returns results matching 'a' and 'b' and artist 'xyz' in any
# backend
search({'any': ['a', 'b'], 'artist': ['xyz']})
# Returns results matching 'a' if within the given URI roots
# "file:///media/music" and "spotify:"
search({'any': ['a']}, uris=['file:///media/music', 'spotify:'])
# Returns results matching artist 'xyz' and 'abc' in any backend
search({'artist': ['xyz', 'abc']})
:param query: one or more queries to search for
:type query: dict
:param uris: zero or more URI roots to limit the search to
:type uris: list of string or :class:`None`
:param exact: if the search should use exact matching
:type exact: :class:`bool`
:rtype: list of :class:`mopidy.models.SearchResult`
.. versionadded:: 1.0
The ``exact`` keyword argument.
"""
query = _normalize_query(query)
uris is None or validation.check_uris(uris)
validation.check_query(query)
validation.check_boolean(exact)
if not query:
return []
futures = {}
for backend, backend_uris in self._get_backends_to_uris(uris).items():
futures[backend] = backend.library.search(
query=query, uris=backend_uris, exact=exact)
# Some of our tests check for LookupError to catch bad queries. This is
# silly and should be replaced with query validation before passing it
# to the backends.
reraise = (TypeError, LookupError)
results = []
for backend, future in futures.items():
try:
with _backend_error_handling(backend, reraise=reraise):
result = future.get()
if result is not None:
validation.check_instance(result, models.SearchResult)
results.append(result)
except TypeError:
backend_name = backend.actor_ref.actor_class.__name__
logger.warning(
'%s does not implement library.search() with "exact" '
'support. Please upgrade it.', backend_name)
return results
def _normalize_query(query):
broken_client = False
# TODO: this breaks if query is not a dictionary like object...
for (field, values) in query.items():
if isinstance(values, compat.string_types):
broken_client = True
query[field] = [values]
if broken_client:
logger.warning(
'A client or frontend made a broken library search. Values in '
'queries must be lists of strings, not a string. Please check what'
' sent this query and file a bug. Query: %s', query)
if not query:
logger.warning(
'A client or frontend made a library search with an empty query. '
'This is strongly discouraged. Please check what sent this query '
'and file a bug.')
return query
|
# Copyright 2001 by Katharine Lindner. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Martel based parser to read NBRF formatted files.
This is a huge regular regular expression for NBRF, built using
the 'regular expressiona on steroids' capabilities of Martel.
http://www-nbrf.georgetown.edu/pirwww/pirhome.shtml
Notes:
Just so I remember -- the new end of line syntax is:
New regexp syntax - \R
\R means "\n|\r\n?"
[\R] means "[\n\r]"
This helps us have endlines be consistent across platforms.
"""
from Bio.Seq import Seq
from Bio.NBRF.ValSeq import valid_sequence_dict
"""Hold NBRF data in a straightforward format.
classes:
o Record - All of the information in an NBRF record.
"""
class Record:
"""Hold NBRF information in a format similar to the original record.
The Record class is meant to make data easy to get to when you are
just interested in looking at NBRF data.
Attributes:
sequence_type
sequence_name
comment
sequence
"""
def __init__(self):
self.sequence_type = ''
self.sequence_name = ''
self.comment = ''
self.sequence = Seq('')
def __str__( self ):
sequence_type = valid_sequence_dict[ self.sequence_type ]
output = 'Sequence type %s\n' % sequence_type
output = output + 'Sequence name %s\n' % self.sequence_name
output = output + '%s\n' % self.comment
output = output + out_sequence( self.sequence.data )
return output
def out_sequence( seq ):
output = ''
for j in range( 0, len( seq ), 80 ):
output = output + '%s\n' % seq[ j: j + 80 ]
output = output + '\n'
return output
|
# -*- coding: utf-8 -*-
"""
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
"""
import socket
import os
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('DJANGO_SECRET_KEY', default='ua1k04-wgcowt1lbn1m@stld#9w4#f5xf80=_5#f9=0!%14c*6')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_PORT = 1025
EMAIL_HOST = 'localhost'
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2', ]
# tricks to have debug toolbar when developing with docker
if os.environ.get('USE_DOCKER') == 'yes':
ip = socket.gethostbyname(socket.gethostname())
INTERNAL_IPS += [ip[:-1] + "1"]
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
ALLOWED_HOSTS = ['127.0.0.1', '192.168.0.100']
|
from __future__ import print_function
import FWCore.ParameterSet.Config as cms
from FWCore.ParameterSet.Config import ModifierChain,Modifier
class Eras (object):
"""
Dummy container for all the cms.Modifier instances that config fragments
can use to selectively configure depending on what scenario is active.
"""
def __init__(self):
allEras=['Run1_pA',
'Run1_peripheralPbPb',
'Run2_50ns',
'Run2_50ns_HIPM',
'Run2_25ns',
'Run2_25ns_HIPM',
'Run2_25ns_peripheralPbPb',
'Run2_HI',
'Run2_2016',
'Run2_2016_HIPM',
'Run2_2016_trackingLowPU',
'Run2_2016_pA',
'Run2_2017',
'Run2_2017_FastSim', #new modifier for Phase1 FastSim, skips the muon GEM sequence
'Run2_2017_trackingRun2',
'Run2_2017_trackingLowPU',
'Run2_2017_pp_on_XeXe',
'Run2_2017_ppRef',
'Run2_2018',
'Run2_2018_FastSim', #new modifier for Phase1 FastSim, skips the muon GEM sequence
'Run2_2018_pp_on_AA',
'Run2_2018_pp_on_AA_noHCALmitigation',
'Run2_2018_highBetaStar',
'Run3',
'Run3_pp_on_PbPb',
'Run3_dd4hep',
'Phase2',
'Phase2C9',
'Phase2C10',
'Phase2C11',
'Phase2C12',
'Phase2C9_dd4hep',
'Phase2C10_dd4hep',
'Phase2C11_dd4hep',
'Phase2C11I13',
'Phase2C11T22',
'Phase2C11T23',
'Phase2C12_dd4hep',
'Phase2C11M9',
'Phase2C11I13M9',
'Phase2C11I13T22M9',
'Phase2C11I13T23M9',
'Phase2C11I13T25M9',
'Phase2C11I13T26M9'
]
internalUseMods = ['run2_common', 'run2_25ns_specific',
'run2_50ns_specific', 'run2_HI_specific',
'stage1L1Trigger', 'fastSim',
'peripheralPbPb', 'pA_2016',
'run2_HE_2017', 'stage2L1Trigger', 'stage2L1Trigger_2017', 'stage2L1Trigger_2018', 'stage2L1Trigger_2021',
'run2_HF_2017', 'run2_HCAL_2017', 'run2_HEPlan1_2017', 'run2_HB_2018','run2_HE_2018',
'run3_HB', 'run3_HFSL', 'run3_common', 'run3_RPC',
'phase1Pixel', 'run3_GEM', 'run2_GEM_2017',
'run2_CSC_2018',
'phase2_common', 'phase2_tracker',
'phase2_muon', 'phase2_GEM', 'phase2_GE0',
'phase2_hgcal', 'phase2_timing', 'phase2_hfnose', 'phase2_hgcalV10', 'phase2_hgcalV11', 'phase2_hgcalV12',
'phase2_timing_layer', 'phase2_etlV4', 'phase2_hcal', 'phase2_ecal','phase2_ecal_devel',
'phase2_trigger',
'phase2_squarePixels', 'phase2_3DPixels',
'trackingLowPU', 'trackingPhase1', 'ctpps', 'ctpps_2016', 'ctpps_2017', 'ctpps_2018', 'ctpps_2021', 'trackingPhase2PU140','highBetaStar_2018',
'tracker_apv_vfp30_2016', 'pf_badHcalMitigationOff', 'run2_miniAOD_80XLegacy','run2_miniAOD_94XFall17', 'run2_nanoAOD_92X',
'run2_nanoAOD_94XMiniAODv1', 'run2_nanoAOD_94XMiniAODv2', 'run2_nanoAOD_94X2016',
'run2_miniAOD_devel', 'run2_nanoAOD_102Xv1', 'run2_nanoAOD_106Xv1', 'run2_nanoAOD_106Xv2', 'run3_nanoAOD_devel',
'hcalHardcodeConditions', 'hcalSkipPacker',
'run2_HLTconditions_2016','run2_HLTconditions_2017','run2_HLTconditions_2018',
'bParking']
internalUseModChains = ['run2_2017_noTrackingModifier']
self.pythonCfgLines = {}
for e in allEras:
eObj=getattr(__import__('Configuration.Eras.Era_'+e+'_cff',globals(),locals(),[e],0),e)
self.addEra(e,eObj)
self.pythonCfgLines[e] = 'from Configuration.Eras.Era_'+e+'_cff import '+e
for e in internalUseMods:
eObj=getattr(__import__('Configuration.Eras.Modifier_'+e+'_cff',globals(),locals(),[e],0),e)
self.addEra(e,eObj)
self.pythonCfgLines[e] = 'from Configuration.Eras.Modifier_'+e+'_cff import '+e
for e in internalUseModChains:
eObj=getattr(__import__('Configuration.Eras.ModifierChain_'+e+'_cff',globals(),locals(),[e],0),e)
self.addEra(e,eObj)
self.pythonCfgLines[e] = 'from Configuration.Eras.ModifierChain_'+e+'_cff import '+e
def addEra(self,name,obj):
setattr(self,name,obj)
def inspectModifier(self,m,details):
print(' ',m.__dict__ ['_Modifier__processModifiers'])
def inspectEra(self,e,details):
print('\nEra:',e)
print(' isChosen:',getattr(self,e)._isChosen())
if details: print(' Modifiers:')
nmod=0
for value in getattr(self,e).__dict__['_ModifierChain__chain']:
if isinstance(value, Modifier):
nmod=nmod+1
if details: self.inspectModifier(value,details)
print(' ',nmod,'modifiers defined')
def inspect(self,name=None,onlyChosen=False,details=True):
if name==None:
print('Inspecting the known eras', end=' ')
if onlyChosen: print(' (all active)')
else: print('(all eras defined)')
else:
print('Inspecting the '+name+' era', end=' ')
allEras=[]
for key, value in self.__dict__.items():
if isinstance(value, ModifierChain): allEras.append(key)
for e in allEras:
if name is not None and name==e:
self.inspectEra(e,details)
if name is None:
if not onlyChosen or getattr(self,e).isChosen():
self.inspectEra(e,details)
eras=Eras()
#eras.inspect()
|
"""
Description:
A technique for detecting anomalies in seasonal univariate time
series where the input is a series of <timestamp, count> pairs.
Usage:
anomaly_detect_ts(x, max_anoms=0.1, direction="pos", alpha=0.05, only_last=None,
threshold="None", e_value=False, longterm=False, piecewise_median_period_weeks=2,
plot=False, y_log=False, xlabel="", ylabel="count", title=None, verbose=False)
Arguments:
x: Time series as a two column data frame where the first column
consists of the timestamps and the second column consists of
the observations.
max_anoms: Maximum number of anomalies that S-H-ESD will detect as a
percentage of the data.
direction: Directionality of the anomalies to be detected. Options are:
"pos" | "neg" | "both".
alpha: The level of statistical significance with which to accept or
reject anomalies.
only_last: Find and report anomalies only within the last day or hr in
the time series. None | "day" | "hr".
threshold: Only report positive going anoms above the threshold
specified. Options are: None | "med_max" | "p95" |
"p99".
e_value: Add an additional column to the anoms output containing the
expected value.
longterm: Increase anom detection efficacy for time series that are
greater than a month. See Details below.
piecewise_median_period_weeks: The piecewise median time window as
described in Vallis, Hochenbaum, and Kejariwal (2014).
Defaults to 2.
plot: A flag indicating if a plot with both the time series and the
estimated anoms, indicated by circles, should also be
returned.
y_log: Apply log scaling to the y-axis. This helps with viewing
plots that have extremely large positive anomalies relative
to the rest of the data.
xlabel: X-axis label to be added to the output plot.
ylabel: Y-axis label to be added to the output plot.
title: Title for the output plot.
verbose: Enable debug messages
resampling: whether ms or sec granularity should be resampled to min granularity.
Defaults to False.
period_override: Override the auto-generated period
Defaults to None
Details:
"longterm" This option should be set when the input time series
is longer than a month. The option enables the approach described
in Vallis, Hochenbaum, and Kejariwal (2014).
"threshold" Filter all negative anomalies and those anomalies
whose magnitude is smaller than one of the specified thresholds
which include: the median of the daily max values (med_max), the
95th percentile of the daily max values (p95), and the 99th
percentile of the daily max values (p99).
Value:
The returned value is a list with the following components.
anoms: Data frame containing timestamps, values, and optionally
expected values.
plot: A graphical object if plotting was requested by the user. The
plot contains the estimated anomalies annotated on the input
time series.
"threshold" Filter all negative anomalies and those anomalies
whose magnitude is smaller than one of the specified thresholds
which include: the median of the daily max values (med_max), the
95th percentile of the daily max values (p95), and the 99th
percentile of the daily max values (p99).
Value:
The returned value is a list with the following components.
anoms: Data frame containing timestamps, values, and optionally
expected values.
plot: A graphical object if plotting was requested by the user. The
plot contains the estimated anomalies annotated on the input
time series.
One can save "anoms" to a file in the following fashion:
write.csv(<return list name>[["anoms"]], file=<filename>)
One can save "plot" to a file in the following fashion:
ggsave(<filename>, plot=<return list name>[["plot"]])
References:
Vallis, O., Hochenbaum, J. and Kejariwal, A., (2014) "A Novel
Technique for Long-Term Anomaly Detection in the Cloud", 6th
USENIX, Philadelphia, PA.
Rosner, B., (May 1983), "Percentage Points for a Generalized ESD
Many-Outlier Procedure" , Technometrics, 25(2), pp. 165-172.
See Also:
anomaly_detect_vec
Examples:
# To detect all anomalies
anomaly_detect_ts(raw_data, max_anoms=0.02, direction="both", plot=True)
# To detect only the anomalies in the last day, run the following:
anomaly_detect_ts(raw_data, max_anoms=0.02, direction="both", only_last="day", plot=True)
# To detect only the anomalies in the last hr, run the following:
anomaly_detect_ts(raw_data, max_anoms=0.02, direction="both", only_last="hr", plot=True)
# To detect only the anomalies in the last hr and resample data of ms or sec granularity:
anomaly_detect_ts(raw_data, max_anoms=0.02, direction="both", only_last="hr", plot=True, resampling=True)
# To detect anomalies in the last day specifying a period of 1440
anomaly_detect_ts(raw_data, max_anoms=0.02, direction="both", only_last="hr", period_override=1440)
"""
import numpy as np
import scipy as sp
import pandas as pd
import datetime
import statsmodels.api as sm
import logging
import matplotlib.pyplot as plt #this will be used for plotting.
logger = logging.getLogger(__name__)
def _handle_granularity_error(level):
"""
Raises ValueError with detailed error message if one of the two situations is true:
1. calculated granularity is less than minute (sec or ms)
2. resampling is not enabled for situations where calculated granularity < min
level : String
the granularity that is below the min threshold
"""
#improving the message as if user selects Timestamp, Dimension, Value sort of data then repeated timelines
#will cause issues with the module. Ideally, user should only supply single KPI for a single dimension with timestamp.
e_message = '%s granularity is not supported. Ensure granularity => minute or enable resampling. Please check if you are using multiple dimensions with same timestamps in the data which cause repetition of same timestamps.' % level
raise ValueError(e_message)
def _resample_to_min(data, period_override=None):
"""
Resamples a data set to the min level of granularity
data : pandas DataFrame
input Pandas DataFrame
period_override : int
indicates whether resampling should be done with overridden value instead of min (1440)
"""
data = data.resample('60s', label='right').sum()
if _override_period(period_override):
period = period_override
else:
period = 1440
return (data, period)
def _override_period(period_override):
"""
Indicates whether period can be overridden if the period derived from granularity does
not match the generated period.
period_override : int
the user-specified period that overrides the value calculated from granularity
"""
return period_override is not None
def _get_period(gran_period, period_arg=None):
"""
Returns the generated period or overridden period depending upon the period_arg
gran_period : int
the period generated from the granularity
period_arg : the period override value that is either None or an int
the period to override the period generated from granularity
"""
if _override_period(period_arg):
return period_arg
else:
return gran_period
def _get_data_tuple(raw_data, period_override, resampling=False):
"""
Generates a tuple consisting of processed input data, a calculated or overridden period, and granularity
raw_data : pandas DataFrame
input data
period_override : int
period specified in the anomaly_detect_ts parameter list, None if it is not provided
resampling : True | False
indicates whether the raw_data should be resampled to a supporting granularity, if applicable
"""
data = raw_data.sort_index()
timediff = _get_time_diff(data)
if timediff.days > 0:
period = _get_period(7, period_override)
granularity = 'day'
elif timediff.seconds / 60 / 60 >= 1:
granularity = 'hr'
period = _get_period(24, period_override)
elif timediff.seconds / 60 >= 1:
granularity = 'min'
period = _get_period(1440, period_override)
elif timediff.seconds > 0:
granularity = 'sec'
elif timediff.seconds > 0:
granularity = 'sec'
'''
Aggregate data to minute level of granularity if data stream granularity is sec and
resampling=True. If resampling=False, raise ValueError
'''
if resampling is True:
period = _resample_to_min(data, period_override)
else:
_handle_granularity_error('sec')
else:
'''
Aggregate data to minute level of granularity if data stream granularity is ms and
resampling=True. If resampling=False, raise ValueError
'''
if resampling is True:
data, period = _resample_to_min(data, period_override)
granularity = None
else:
_handle_granularity_error('ms')
return (data, period, granularity)
def _get_time_diff(data):
"""
Generates the time difference used to determine granularity and
to generate the period
data : pandas DataFrame
composed of input data
"""
return data.index[1] - data.index[0]
def _get_max_anoms(data, max_anoms):
"""
Returns the max_anoms parameter used for S-H-ESD time series anomaly detection
data : pandas DataFrame
composed of input data
max_anoms : float
the input max_anoms
"""
if max_anoms == 0:
logger.warning('0 max_anoms results in max_outliers being 0.')
return 1 / data.size if max_anoms < 1 / data.size else max_anoms
def _process_long_term_data(data, period, granularity, piecewise_median_period_weeks):
"""
Processes result set when longterm is set to true
data : list of floats
the result set of anoms
period : int
the calculated or overridden period value
granularity : string
the calculated or overridden granularity
piecewise_median_period_weeks : int
used to determine days and observations per period
"""
# Pre-allocate list with size equal to the number of piecewise_median_period_weeks chunks in x + any left over chunk
# handle edge cases for daily and single column data period lengths
num_obs_in_period = period * piecewise_median_period_weeks + \
1 if granularity == 'day' else period * 7 * piecewise_median_period_weeks
num_days_in_period = (7 * piecewise_median_period_weeks) + \
1 if granularity == 'day' else (7 * piecewise_median_period_weeks)
all_data = []
# Subset x into piecewise_median_period_weeks chunks
for i in range(1, data.size + 1, num_obs_in_period):
start_date = data.index[i]
# if there is at least 14 days left, subset it, otherwise subset last_date - 14 days
end_date = start_date + datetime.timedelta(days=num_days_in_period)
if end_date < data.index[-1]:
all_data.append(
data.loc[lambda x: (x.index >= start_date) & (x.index <= end_date)])
else:
all_data.append(
data.loc[lambda x: x.index >= data.index[-1] - datetime.timedelta(days=num_days_in_period)])
return all_data
def _get_only_last_results(data, all_anoms, granularity, only_last):
"""
Returns the results from the last day or hour only
data : pandas DataFrame
input data set
all_anoms : list of floats
all of the anomalies returned by the algorithm
granularity : string day | hr | min
The supported granularity value
only_last : string day | hr
The subset of anomalies to be returned
"""
#Unused variables start_date and x_subset_week were commented by aliasgherman
# on 2020-06-13 as the plot logic does not utilize them for now.
#start_date = data.index[-1] - datetime.timedelta(days=7)
start_anoms = data.index[-1] - datetime.timedelta(days=1)
if only_last == 'hr':
# We need to change start_date and start_anoms for the hourly only_last option
#start_date = datetime.datetime.combine(
# (data.index[-1] - datetime.timedelta(days=2)).date(), datetime.time.min)
start_anoms = data.index[-1] - datetime.timedelta(hours=1)
# subset the last days worth of data
x_subset_single_day = data.loc[data.index > start_anoms]
# When plotting anoms for the last day only we only show the previous weeks data
## Below was commented out by aliasgherman as the plot logic (v001)
## does not use this variable and plots whole dataset.
##x_subset_week = data.loc[lambda df: (
## df.index <= start_anoms) & (df.index > start_date)]
#
return all_anoms.loc[all_anoms.index >= x_subset_single_day.index[0]]
def _get_plot_breaks(granularity, only_last):
"""
Generates the breaks used in plotting
granularity : string
the supported granularity value
only_last : True | False
indicates whether only the last day or hour is returned and to be plotted
"""
if granularity == 'day':
breaks = 3 * 12
elif only_last == 'day':
breaks = 12
else:
breaks = 3
return breaks
def _perform_threshold_filter(anoms, periodic_max, threshold):
"""
Filters the list of anomalies per the threshold filter
anoms : list of floats
the anoms returned by the algorithm
periodic_max : float
calculated daily max value
threshold : med_max" | "p95" | "p99"
user-specified threshold value used to filter anoms
"""
if threshold == 'med_max':
thresh = periodic_max.median()
elif threshold == 'p95':
thresh = periodic_max.quantile(0.95)
elif threshold == 'p99':
thresh = periodic_max.quantile(0.99)
else:
raise AttributeError(
'Invalid threshold, threshold options are None | med_max | p95 | p99')
return anoms.loc[anoms.values >= thresh]
def _get_max_outliers(data, max_percent_anomalies):
"""
Calculates the max_outliers for an input data set
data : pandas DataFrame
the input data set
max_percent_anomalies : float
the input maximum number of anomalies per percent of data set values
"""
max_outliers = int(np.trunc(data.size * max_percent_anomalies))
if not max_outliers:
raise ValueError('With longterm=True, AnomalyDetection splits the data into 2 week periods by default. You have {0} observations in a period, which is too few. Set a higher piecewise_median_period_weeks.'.format(
data.size))
return max_outliers
def _get_decomposed_data_tuple(data, num_obs_per_period):
"""
Returns a tuple consisting of two versions of the input data set: seasonally-decomposed and smoothed
data : pandas DataFrame
the input data set
num_obs_per_period : int
the number of observations in each period
"""
decomposed = sm.tsa.seasonal_decompose(
data, freq=num_obs_per_period, two_sided=False)
smoothed = data - decomposed.resid.fillna(0)
data = data - decomposed.seasonal - data.mean()
return (data, smoothed)
def anomaly_detect_ts(x, max_anoms=0.1, direction="pos", alpha=0.05, only_last=None,
threshold=None, e_value=False, longterm=False, piecewise_median_period_weeks=2,
plot=False, y_log=False, xlabel="", ylabel="count", title='shesd output: ', verbose=False,
dropna=False, resampling=False, period_override=None):
if verbose:
logger.setLevel(logging.DEBUG)
logger.debug("The debug logs will be logged because verbose=%s", verbose)
# validation
if isinstance(x, pd.Series) == False:
raise AssertionError('Data must be a series(Pandas.Series)')
#changing below as apparantly the large integer data like int64 was not captured by below
if x.values.dtype not in [int, float, 'int64']:
raise ValueError('Values of the series must be number')
if x.index.dtype != np.dtype('datetime64[ns]'):
raise ValueError('Index of the series must be datetime')
if max_anoms > 0.49 or max_anoms < 0:
raise AttributeError('max_anoms must be non-negative and less than 50% ')
if direction not in ['pos', 'neg', 'both']:
raise AttributeError('direction options: pos | neg | both')
if only_last not in [None, 'day', 'hr']:
raise AttributeError('only_last options: None | day | hr')
if threshold not in [None, 'med_max', 'p95', 'p99']:
raise AttributeError('threshold options: None | med_max | p95 | p99')
if piecewise_median_period_weeks < 2:
raise AttributeError('piecewise_median_period_weeks must be greater than 2 weeks')
logger.debug('Completed validation of input parameters')
if alpha < 0.01 or alpha > 0.1:
logger.warning('alpha is the statistical significance and is usually between 0.01 and 0.1')
data, period, granularity = _get_data_tuple(x, period_override, resampling)
if granularity == 'day':
num_days_per_line = 7
logger.info("Recording the variable in case plot function needs it. gran = day. {}".format(num_days_per_line))
only_last = 'day' if only_last == 'hr' else only_last
max_anoms = _get_max_anoms(data, max_anoms)
# If longterm is enabled, break the data into subset data frames and store in all_data
all_data = _process_long_term_data(data, period, granularity, piecewise_median_period_weeks) if longterm else [data]
all_anoms = pd.Series()
seasonal_plus_trend = pd.Series()
# Detect anomalies on all data (either entire data in one-pass, or in 2 week blocks if longterm=True)
for series in all_data:
shesd = _detect_anoms(series, k=max_anoms, alpha=alpha, num_obs_per_period=period, use_decomp=True,
use_esd=False, direction=direction, verbose=verbose)
shesd_anoms = shesd['anoms']
shesd_stl = shesd['stl']
# -- Step 3: Use detected anomaly timestamps to extract the actual anomalies (timestamp and value) from the data
anoms = pd.Series() if shesd_anoms.empty else series.loc[shesd_anoms.index]
# Filter the anomalies using one of the thresholding functions if applicable
if threshold:
# Calculate daily max values
periodic_max = data.resample('1D').max()
anoms = _perform_threshold_filter(anoms, periodic_max, threshold)
all_anoms = all_anoms.append(anoms)
seasonal_plus_trend = seasonal_plus_trend.append(shesd_stl)
# De-dupe
all_anoms.drop_duplicates(inplace=True)
seasonal_plus_trend.drop_duplicates(inplace=True)
# If only_last is specified, create a subset of the data corresponding to the most recent day or hour
if only_last:
all_anoms = _get_only_last_results(
data, all_anoms, granularity, only_last)
# If there are no anoms, log it and return an empty anoms result
if all_anoms.empty:
if verbose:
logger.info('No anomalies detected.')
return {
'anoms': pd.Series(),
'plot': None
}
ret_val = {
'anoms': all_anoms,
'expected': seasonal_plus_trend if e_value else None,
'plot': 'TODO' if plot else None
}
if plot:
# TODO additional refactoring and logic needed to support plotting
#num_days_per_line
#breaks = _get_plot_breaks(granularity, only_last)
# x_subset_week
ret_plot = _plot_anomalies(data, ret_val)
ret_val['plot'] = ret_plot
#raise Exception('TODO: Unsupported now')
return ret_val
def _plot_anomalies(data, results):
"""
Tries to plot the data and the anomalies detected in this data.
ArgsL
data: Time series on which we are performing the anomaly detection. (full data)
results: the results dictionary which contains anomalies grouped in the key called 'anoms'
"""
anoms = pd.DataFrame(results)
df_plot = pd.DataFrame(data).join(anoms, how='left')
#df_plot = df_plot.fillna(0) #if no anomaly, then we will plot a zero. can be improved.
df_plot['anoms'].unique()
_, ax = plt.subplots(figsize=(14,6))
ax.plot(df_plot['anoms'], color='r', marker='o', label='Anomaly', linestyle="None")
ax.plot(data, label=data.name)
ax.set_title(data.name)
ax.legend(loc='best')
ax.grid(b=True)
#plt.show()
return ax
def _detect_anoms(data, k=0.49, alpha=0.05, num_obs_per_period=None,
use_decomp=True, use_esd=False, direction="pos", verbose=False):
"""
Detects anomalies in a time series using S-H-ESD.
Args:
data: Time series to perform anomaly detection on.
k: Maximum number of anomalies that S-H-ESD will detect as a percentage of the data.
alpha: The level of statistical significance with which to accept or reject anomalies.
num_obs_per_period: Defines the number of observations in a single period, and used during seasonal decomposition.
use_decomp: Use seasonal decomposition during anomaly detection.
use_esd: Uses regular ESD instead of hybrid-ESD. Note hybrid-ESD is more statistically robust.
one_tail: If TRUE only positive or negative going anomalies are detected depending on if upper_tail is TRUE or FALSE.
upper_tail: If TRUE and one_tail is also TRUE, detect only positive going (right-tailed) anomalies. If FALSE and one_tail is TRUE, only detect negative (left-tailed) anomalies.
verbose: Additionally printing for debugging.
Returns:
A list containing the anomalies (anoms) and decomposition components (stl).
"""
# validation
assert num_obs_per_period, "must supply period length for time series decomposition"
assert direction in ['pos', 'neg',
'both'], 'direction options: pos | neg | both'
###########################################################################
# Changing below code. If the data contains broken dates then the data.size may be less than observation periods
# so for such cases, we should return empty obsevations
###########################################################################
#assert data.size >= num_obs_per_period * \
# 2, 'Anomaly detection needs at least 2 periods worth of data'
if data.size < num_obs_per_period * 2:
return {
'anoms': pd.Series(), #return empty series
'stl': data #return untouched data...
}
# test case can be any data set which has large gapes in the dates.
# like data contains dates from year 2000 till 2020 but for 2001, 2001-01-01 till 2001-01-04 and then from 2001-06-01.
# this will break the obs_period and data.size check. So I have just removed anomaly detection for these small patches.
###########################################################################
assert data[data.isnull(
)].empty, 'Data contains NA. We suggest replacing NA with interpolated values before detecting anomaly'
# conversion
one_tail = True if direction in ['pos', 'neg'] else False
upper_tail = True if direction in ['pos', 'both'] else False
# -- Step 1: Decompose data. This returns a univariate remainder which will be used for anomaly detection. Optionally, we might NOT decompose.
# Note: R use stl, but here we will use MA, the result may be different TODO.. Here need improvement
#decomposed = sm.tsa.seasonal_decompose(data, freq=num_obs_per_period, two_sided=False)
#smoothed = data - decomposed.resid.fillna(0)
#data = data - decomposed.seasonal - data.mean()
data, smoothed = _get_decomposed_data_tuple(data, num_obs_per_period)
max_outliers = _get_max_outliers(data, k)
R_idx = pd.Series()
n = data.size
# Compute test statistic until r=max_outliers values have been
# removed from the sample.
for i in range(1, max_outliers + 1):
if verbose:
logger.info(i, '/', max_outliers, ' completed')
if not data.mad():
break
if not one_tail:
ares = abs(data - data.median())
elif upper_tail:
ares = data - data.median()
else:
ares = data.median() - data
ares = ares / data.mad()
tmp_anom_index = ares[ares.values == ares.max()].index
cand = pd.Series(data.loc[tmp_anom_index], index=tmp_anom_index)
data.drop(tmp_anom_index, inplace=True)
# Compute critical value.
p = 1 - alpha / (n - i + 1) if one_tail else (1 -
alpha / (2 * (n - i + 1)))
t = sp.stats.t.ppf(p, n - i - 1)
lam = t * (n - i) / np.sqrt((n - i - 1 + t ** 2) * (n - i + 1))
if ares.max() > lam:
R_idx = R_idx.append(cand)
return {
'anoms': R_idx,
'stl': smoothed
}
|
from django.contrib import admin
from .models import Comments
admin.site.register(Comments)
|
import asyncio
from ph4_walkingpad import pad
from ph4_walkingpad.pad import WalkingPad, Controller
from ph4_walkingpad.utils import setup_logging
import yaml
import psycopg2
from datetime import date
def on_new_status(sender, record):
distance_in_km = record.dist / 100
print("Received Record:")
print('Distance: {0}km'.format(distance_in_km))
print('Time: {0} seconds'.format(record.time))
print('Steps: {0}'.format(record.steps))
print("Storing in DB...")
store_in_db(record.steps, distance_in_km, record.time)
ctler.last_status = None
def store_in_db(steps, distance_in_km, duration_in_seconds):
try:
db_config = config['database']
conn = psycopg2.connect(host=db_config['host'], port=db_config['port'], dbname=db_config['dbname'], user=db_config['user'], password=db_config['password'])
cur = conn.cursor()
date_today = date.today().strftime("%Y-%m-%d")
duration = int(duration_in_seconds / 60)
cur.execute("INSERT INTO exercise VALUES ('{0}', {1}, {2}, {3})".format(date_today, steps, duration, distance_in_km))
conn.commit()
finally:
cur.close()
conn.close()
def load_config():
with open("config.yaml", 'r') as stream:
try:
return yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
config = load_config()
log = setup_logging()
pad.logger = log
ctler = Controller()
ctler.handler_last_status = on_new_status
async def connect():
address = config['address']
print("Connecting to {0}".format(address))
await ctler.run(address)
async def disconnect():
await ctler.disconnect()
async def set_to_standby():
await ctler.switch_mode(WalkingPad.MODE_STANDBY)
async def get_stats():
await ctler.ask_hist(0)
await asyncio.sleep(1.0)
async def main():
try:
print("Connecting...")
await connect()
await asyncio.sleep(1.0)
print("Getting Setting to standby...")
await set_to_standby()
await asyncio.sleep(1.0)
print("Getting Stats...")
await get_stats()
await asyncio.sleep(1.0)
finally:
print("Disconnecting from device")
await disconnect()
await asyncio.sleep(1.0)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
|
"""
Created: March 1, 2020
Updated: September 14, 2020
Author: Suleyman Barthe-Sukhera
Description: RSA private and public key classes
"""
from binascii import hexlify, unhexlify
from os import getcwd
from Crypto.Cipher import PKCS1_OAEP
from Crypto.Hash.SHA3_256 import SHA3_256_Hash
from Crypto.PublicKey.RSA import \
generate as generate_rsa_key, \
import_key as import_rsa_key, \
construct as construct_rsa_key, \
RsaKey as CryptoRSAKey
from CommonLib.proto.AIOMessage_pb2 import AIOMessage
from CommonLib.proto.Base_pb2 import EncryptionType, Error, ErrorCode
class BaseRSAKey:
def __init__(self, name: str, rsa_key: CryptoRSAKey):
self.name = name
self.__rsa_key = rsa_key
self.__cipher = self.__cipher = PKCS1_OAEP.new(rsa_key, hashAlgo=SHA3_256_Hash)
""" === ENCRYPTION === """
def encrypt(self, message: AIOMessage) -> AIOMessage:
""" Encrypts AIOMessage.message contents """
# Set encryption type to RSA
message.encryption_type = EncryptionType.RSA
# Grab plain_text_message, and remove plain_text_message from message
plain_text_message = message.message
message.ClearField('message')
# Encrypt plain_text_message and insert as encrypted message
try:
message.encrypted_messages.Extend([self.__cipher.encrypt(plain_text_message)])
except Exception as e:
error = Error()
error.error_code = ErrorCode.ENCRYPTION_ERR0R
error.error_details = str(e)
print(f'WARNING - ENCRYPTION FAILURE: {e}')
message.error.ParseFromString(error.SerializeToString())
# TODO: Add encryption timestamp
return message
""" === DECRYPTION === """
def decrypt(self, message: AIOMessage) -> AIOMessage:
""" Decrypts AIOMessage.encrypted_message contents """
# Make sure encryption type is correct
assert message.encryption_type == EncryptionType.RSA, \
f'Expected RSA encryption, got: {message.encryption_type}!'
# Make sure there is only one message for RSA encrypted messages
encrypted_messages = [encrypted_message for encrypted_message in message.encrypted_messages]
assert len(encrypted_messages) == 1, \
f'RSA encryption only expects one encrypted message, ' \
f'received {len(encrypted_messages)} encrypted messages!'
# Populate plain text message field and remove encrypted message and encrypted message type
try:
message.message = self.__cipher.decrypt(encrypted_messages[0])
message.ClearField('encrypted_messages')
message.ClearField('encryption_type')
except Exception as e:
error = Error()
error.error_code = ErrorCode.DECRYPTION_ERROR
error.error_details = str(e)
print(f'WARNING - DECRYPTION FAILURE: {e}')
message.error.ParseFromString(error.SerializeToString())
# Return decrypted message
return message
""" === GETTERS === """
def get_pub_key_details(self):
return self.__rsa_key.n, self.__rsa_key.e
class PrivateRSAKey(BaseRSAKey):
""" Private RSA asymmetric key that can en/de/crypt messages """
def __init__(
self, name: str, rsa_pwd: str, mod_len: int = 2048,
rsa_key_path: str = f'{getcwd()}\\User\\Keys\\rsa.pem') -> None:
"""
Base initializer for RSAKey, will try and load an existing RSAKey before creating a new one
:param name: String name that this object will log itself with
:param rsa_pwd: Bytes used to decode rsa.pem
:param mod_len: Modulus length for creating RSA key, default to 2048
:param rsa_key_path: Optional modifier of default path to RSA key
"""
self.name = name
self.__modulus_len = mod_len
self.rsa_key_path = rsa_key_path
try:
print('Loading existing RSA keys...')
with open(self.rsa_key_path, 'r') as rsa_file:
imported_key = rsa_file.read().encode('utf-8')
imported_key = unhexlify(imported_key)
__private_rsa_key = import_rsa_key(imported_key, passphrase=rsa_pwd)
except (FileNotFoundError, ValueError) as e:
if type(e) is ValueError:
print('Failed to parse existing RSA keys, creating new ones...')
elif type(e) is FileNotFoundError:
print('RSA keys do not exist, creating new ones...')
__private_rsa_key = generate_rsa_key(mod_len, e=23981519)
with open(self.rsa_key_path, 'w+') as rsa_file:
exportable_key = hexlify(__private_rsa_key.export_key(
format='DER', passphrase=rsa_pwd, pkcs=8))
rsa_file.write(exportable_key.decode('utf-8'))
print('RSA keys created and saved!')
except Exception as e:
print(f'CRITICAL ERROR 0: Failed to initialize RSA keys with error:\n{e}')
raise e
# Build rest of RSA key functionality
BaseRSAKey.__init__(self, name, __private_rsa_key)
print('Successfully initialized PrivateRSAKey')
""" === GETTERS === """
def get_public_key(self) -> dict:
""" returns 'n' and 'e' """
pub_key_details = self.get_pub_key_details()
return dict(
n=pub_key_details[0].to_bytes(length=int(self.__modulus_len / 8), byteorder='big'),
e=pub_key_details[1].to_bytes(length=4, byteorder='big'))
class PublicRSAKey(BaseRSAKey):
def __init__(self, name: str, pub_key_details: dict) -> None:
self.name = name
# Build rest of RSA key functionality
BaseRSAKey.__init__(
self, name, construct_rsa_key(
(int.from_bytes(pub_key_details['n'], byteorder='big'),
int.from_bytes(pub_key_details['e'], byteorder='big'))))
print('Successfully initialized PublicRSAKey')
|
from pylab import figure, show, setp
#from matplotlib.numerix import sin, cos, exp, pi, arange
import numpy as np
t = np.arange(0.0, 2.0, 0.01)
s1 = np.sin(2*np.pi*t)
s2 = np.exp(-t)
s3 = np.sin(2*np.pi*t)*np.exp(-t)
s4 = np.sin(2*np.pi*t)*np.cos(4*np.pi*t)
fig = figure()
t = np.arange(0.0, 2.0, 0.01)
yprops = dict(rotation=0,
horizontalalignment='right',
verticalalignment='center',
x=-0.01)
axprops = dict(yticks=[-1, 0, 1])
ax1 =fig.add_axes([0.1, 0.7, 0.8, 0.2], **axprops)
ax1.plot(t, s1)
ax1.set_ylabel('S1', **yprops)
axprops['sharex'] = ax1
axprops['sharey'] = ax1
# force x axes to remain in register, even with toolbar navigation
ax2 = fig.add_axes([0.1, 0.5, 0.8, 0.2], **axprops)
ax2.plot(t, s2)
ax2.set_ylabel('S2', **yprops)
ax3 = fig.add_axes([0.1, 0.3, 0.8, 0.2], **axprops)
ax3.plot(t, s4)
ax3.set_ylabel('S3', **yprops)
ax4 = fig.add_axes([0.1, 0.1, 0.8, 0.2], **axprops)
ax4.plot(t, s4)
ax4.set_ylabel('S4', **yprops)
# turn off x ticklabels for all but the lower axes
for ax in ax1, ax2, ax3:
setp(ax.get_xticklabels(), visible=False)
show()
|
# Generated by Django 3.2.5 on 2021-08-01 18:53
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=100, unique=True)),
('name', models.CharField(max_length=150)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
|
from __future__ import absolute_import, division, print_function
import logging
import sys
from mesos.interface import Executor
from .messages import decode, encode
class ExecutorProxy(Executor):
"""Base class for Mesos executors.
Users' executors should extend this class to get default implementations of
methods they don't override.
"""
def __init__(self, executor):
self.executor = executor
def registered(self, driver, executorInfo, frameworkInfo, slaveInfo):
logging.info('Registered with slave', extra=dict())
return self.executor.on_registered(ExecutorDriverProxy(driver),
decode(executorInfo),
decode(frameworkInfo),
decode(slaveInfo))
def reregistered(self, driver, slaveInfo):
logging.info('Re-registered with slave', extra=dict())
return self.executor.on_reregistered(ExecutorDriverProxy(driver),
decode(slaveInfo))
def disconnected(self, driver):
logging.info('Disconnected from slave')
return self.executor.on_disconnected(ExecutorDriverProxy(driver))
def launchTask(self, driver, taskInfo):
logging.info('Launches task', extra=dict())
return self.executor.on_launch(ExecutorDriverProxy(driver),
decode(taskInfo))
def killTask(self, driver, taskId):
logging.info('Kills task', extra=dict())
return self.executor.on_kill(ExecutorDriverProxy(driver),
decode(taskId))
def frameworkMessage(self, driver, message):
logging.info('Recived framework message', extra=dict())
return self.executor.on_message(ExecutorDriverProxy(driver),
message)
def shutdown(self, driver):
logging.info('Executor shutdown')
return self.executor.on_shutdown(ExecutorDriverProxy(driver))
def error(self, driver, message):
print("Error from Mesos: %s" % message, file=sys.stderr)
return self.executor.on_error(ExecutorDriverProxy(driver),
message)
class ExecutorDriverProxy(object):
def __init__(self, driver):
self.driver = driver
def start(self):
"""Starts the executor driver.
This needs to be called before any other driver calls are made.
"""
logging.info('Driver started')
return self.driver.start()
def stop(self):
"""Stops the executor driver."""
logging.info('Driver stopped')
return self.driver.stop()
def abort(self):
"""Aborts the driver so that no more callbacks can be made to the
executor.
The semantics of abort and stop have deliberately been separated so that
code can detect an aborted driver (i.e., via the return status of
ExecutorDriver.join), and instantiate and start another driver if
desired (from within the same process, although this functionality is
currently not supported for executors).
"""
logging.info('Driver aborted')
return self.driver.abort()
def join(self):
"""Waits for the driver to be stopped or aborted, possibly blocking the
current thread indefinitely.
The return status of this function can be used to determine if the
driver was aborted (see mesos.proto for a description of Status).
"""
logging.info('Joined to driver')
return self.driver.join()
def run(self):
"""Starts and immediately joins (i.e., blocks on) the driver."""
logging.info('Driver run')
return self.driver.run()
def update(self, status):
"""Sends a status update to the framework scheduler.
Retrying as necessary until an acknowledgement has been received or the
executor is terminated (in which case, a TASK_LOST status update will be
sent).
See Scheduler.statusUpdate for more information about status update
acknowledgements.
"""
logging.info('Executor sends status update')
return self.driver.sendStatusUpdate(encode(status))
def message(self, data):
"""Sends a message to the framework scheduler.
These messages are best effort; do not expect a framework message to be
retransmitted in any reliable fashion.
"""
logging.info('Driver sends framework message')
return self.driver.sendFrameworkMessage(data)
|
from Jumpscale import j
from bottle import Bottle, request, response
app = Bottle()
client = j.clients.oauth_proxy.get("main")
oauth_app = j.tools.oauth_proxy.get(app, client)
@app.route("/oauth/authorize")
def authorize():
return oauth_app.authorize()
@app.route("/oauth/callback")
def callback():
return oauth_app.oauth_callback()
@app.route("/oauth/providers")
def providers():
response.content_type = "application/json"
return j.data.serializers.json.dumps(client.providers_list())
@app.route("/oauth/key")
def key():
return j.data.nacl.default.verify_key_hex
app = oauth_app.app
class Oauth2Factory(j.baseclasses.threebot_factory):
__jslocation__ = "j.threebot_factories.package.oauth2"
def get_app(self):
return app
|
from flask import Flask
from flask_bootstrap import Bootstrap
from config import config_options
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_uploads import UploadSet,configure_uploads,IMAGES
from flask_mail import Mail
from flask_simplemde import SimpleMDE
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
# app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
bootstrap = Bootstrap()
db = SQLAlchemy()
photos = UploadSet('photos',IMAGES)
mail = Mail()
simple = SimpleMDE()
def create_app(config_name):
app = Flask(__name__)
# Creating the app configurations
app.config.from_object(config_options[config_name])
# Initializing flask extensions
bootstrap.init_app(app)
db.init_app(app)
login_manager.init_app(app)
mail.init_app(app)
simple.init_app(app)
# Registering the blueprint
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint,url_prefix = '/auth')
# setting config
# from .requests import configure_request
# configure_request(app)
# configure UploadSet
configure_uploads(app,photos)
return app
|
from __future__ import absolute_import
import falcon
from tracker.utils import create_cart_id, json_dumps
from tracker.hooks import validate_request
from tracker.tasks import db_save
class CartItem(object):
@falcon.before(validate_request)
def on_post(self, req, resp, **params):
params['cart_id'] = params.get('cart_id', create_cart_id())
db_save.delay(params)
resp.set_cookie('cart_id', params.get('cart_id'))
resp.status = falcon.HTTP_201
resp.body = json_dumps({'cart_id': params.get('cart_id')})
app = falcon.API()
cart_item = CartItem()
app.add_route('/item', cart_item)
app.add_route('/item/{id}', cart_item)
|
# Copyright (c) 2021 Horizon Robotics and ALF Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
task_conf_mapping = dict()
exp_dir = "lite_sac"
if __name__ == "__main__":
method, task, cluster = sys.argv[1:4]
repeats = 3
if len(sys.argv) > 4:
repeats = sys.argv[4]
print("Method: %s, task: %s, cluster: %s, repeats: %s" %
(method, task, cluster, repeats))
for i in range(int(repeats)):
print("Launching repeat %d" % i)
if task in task_conf_mapping:
task_conf = task_conf_mapping[task]
else:
task_conf = task
os.system(
f"python -m cluster_train --job_name {method}_{task}"
f" --conf {exp_dir}/{method}/{method}_{task_conf}_conf.py"
f" --search_config {exp_dir}/{task}.json --cluster {cluster}")
|
"""The application/controller class for ABQ Data Entry"""
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
from tkinter import filedialog
from . import views as v
from . import models as m
from .mainmenu import MainMenu
class Application(tk.Tk):
"""Application root window"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Hide window while GUI is built
self.withdraw()
# Authenticate
if not self._show_login():
self.destroy()
return
# show the window
self.deiconify()
# Create model
self.model = m.CSVModel()
# Load settings
# self.settings = {
# 'autofill date': tk.BooleanVar(),
# 'autofill sheet data': tk.BoleanVar()
# }
self.settings_model = m.SettingsModel()
self._load_settings()
# Begin building GUI
self.title("ABQ Data Entry Application")
self.columnconfigure(0, weight=1)
# Create the menu
menu = MainMenu(self, self.settings)
self.config(menu=menu)
event_callbacks = {
'<<FileSelect>>': self._on_file_select,
'<<FileQuit>>': lambda _: self.quit(),
}
for sequence, callback in event_callbacks.items():
self.bind(sequence, callback)
ttk.Label(
self,
text="ABQ Data Entry Application",
font=("TkDefaultFont", 16)
).grid(row=0)
self.recordform = v.DataRecordForm(self, self.model, self.settings)
self.recordform.grid(row=1, padx=10, sticky=(tk.W + tk.E))
self.recordform.bind('<<SaveRecord>>', self._on_save)
# status bar
self.status = tk.StringVar()
self.statusbar = ttk.Label(self, textvariable=self.status)
self.statusbar.grid(sticky=(tk.W + tk.E), row=3, padx=10)
self._records_saved = 0
def _on_save(self, *_):
"""Handles file-save requests"""
# Check for errors first
errors = self.recordform.get_errors()
if errors:
self.status.set(
"Cannot save, error in fields: {}"
.format(', '.join(errors.keys()))
)
message = "Cannot save record"
detail = "The following fields have errors: \n * {}".format(
'\n * '.join(errors.keys())
)
messagebox.showerror(
title='Error',
message=message,
detail=detail
)
return False
data = self.recordform.get()
self.model.save_record(data)
self._records_saved += 1
self.status.set(
"{} records saved this session".format(self._records_saved)
)
self.recordform.reset()
def _on_file_select(self, *_):
"""Handle the file->select action"""
filename = filedialog.asksaveasfilename(
title='Select the target file for saving records',
defaultextension='.csv',
filetypes=[('CSV', '*.csv *.CSV')]
)
if filename:
self.model = m.CSVModel(filename=filename)
@staticmethod
def _simple_login(username, password):
"""A basic authentication backend with a hardcoded user and password"""
return username == 'abq' and password == 'Flowers'
def _show_login(self):
"""Show login dialog and attempt to login"""
error = ''
title = "Login to ABQ Data Entry"
while True:
login = v.LoginDialog(self, title, error)
if not login.result: # User canceled
return False
username, password = login.result
if self._simple_login(username, password):
return True
error = 'Login Failed' # loop and redisplay
def _load_settings(self):
"""Load settings into our self.settings dict."""
vartypes = {
'bool': tk.BooleanVar,
'str': tk.StringVar,
'int': tk.IntVar,
'float': tk.DoubleVar
}
# create our dict of settings variables from the model's settings.
self.settings = dict()
for key, data in self.settings_model.fields.items():
vartype = vartypes.get(data['type'], tk.StringVar)
self.settings[key] = vartype(value=data['value'])
# put a trace on the variables so they get stored when changed.
for var in self.settings.values():
var.trace_add('write', self._save_settings)
def _save_settings(self, *_):
"""Save the current settings to a preferences file"""
for key, variable in self.settings.items():
self.settings_model.set(key, variable.get())
self.settings_model.save()
|
#!/usr/bin/env python
import os
import sys
from pathlib import Path
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# This allows easy placement of apps within the interior
# bug_tracker_v2 directory.
current_path = Path(__file__).parent.resolve()
sys.path.append(str(current_path / "bug_tracker_v2"))
execute_from_command_line(sys.argv)
|
# -*- coding: utf-8 -*-
#
# Jinja2 documentation build configuration file, created by
# sphinx-quickstart on Sun Apr 27 21:42:41 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'jinjaext']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'Jinja2'
copyright = '2008, Armin Ronacher'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
import pkg_resources
try:
release = pkg_resources.get_distribution('Jinja2').version
except ImportError:
print('To build the documentation, The distribution information of Jinja2')
print('Has to be available. Either install the package into your')
print('development environment or run "setup.py develop" to setup the')
print('metadata. A virtualenv is recommended!')
sys.exit(1)
if 'dev' in release:
release = release.split('dev')[0] + 'dev'
version = '.'.join(release.split('.')[:2])
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'jinjaext.JinjaStyle'
# Options for HTML output
# -----------------------
html_theme = 'jinja'
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# no modindex
html_use_modindex = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it.
#html_use_opensearch = False
# Output file base name for HTML help builder.
htmlhelp_basename = 'Jinja2doc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('latexindex', 'Jinja2.tex', 'Jinja2 Documentation', 'Armin Ronacher',
'manual'),
]
# Additional stuff for LaTeX
latex_elements = {
'fontpkg': r'\usepackage{mathpazo}',
'papersize': 'a4paper',
'pointsize': '12pt',
'preamble': r'''
\usepackage{jinjastyle}
% i hate you latex
\DeclareUnicodeCharacter{14D}{o}
'''
}
latex_use_parts = True
latex_additional_files = ['jinjastyle.sty', 'logo.pdf']
# If false, no module index is generated.
latex_use_modindex = False
html_sidebars = {
'index': ['sidebarlogo.html', 'sidebarintro.html', 'sourcelink.html',
'searchbox.html'],
'**': ['sidebarlogo.html', 'localtoc.html', 'relations.html',
'sourcelink.html', 'searchbox.html']
}
|
import keras.backend as K
from keras.layers.convolutional import Conv3D
from keras.layers.core import Activation, Dense, Dropout
from keras.layers.merge import concatenate
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import (AveragePooling3D, GlobalAveragePooling3D, AveragePooling2D)
from keras.regularizers import l2
from keras.layers import Input, Flatten
from keras.models import Model
def __create_Conv3D_net(simple,
width,
height,
frames,
nb_classes,
weight_decay=1e-4,
activation='softmax',
# attention=True,
):
# ๅฆๆchannel ๅจ็ฌฌไธ็ปด็่ฏ ้ฃๆญคๅผไธบ1 ๅฆๅ(tensorflow้ป่ฎค)ไธบ-1
DInput = Input([height, width, frames, 1])
x = Conv3D(filters=16, kernel_size=(3, 3, 3), padding='same',
strides=(1, 1, 1), kernel_regularizer=l2(weight_decay), data_format='channels_last',
input_shape=(simple, frames, width, height, 1))(DInput)
x = Activation('relu')(x)
x = AveragePooling3D()(x)
#
x = Conv3D(16, (3, 3, 3), padding='same',
strides=(1, 1, 1), kernel_regularizer=l2(weight_decay), data_format='channels_last')(x)
x = Activation('relu')(x)
x = AveragePooling3D()(x)
x = Conv3D(32, (3, 3, 3), padding='same',
strides=(1, 1, 1), kernel_regularizer=l2(weight_decay), data_format='channels_last')(x)
x = Activation('relu')(x)
x = AveragePooling3D()(x)
x = Conv3D(2, (3, 3, 3), padding='same',
strides=(1, 1, 1), kernel_regularizer=l2(weight_decay), data_format='channels_last')(x)
x = Activation('relu')(x)
x = AveragePooling3D()(x)
x = Dropout(0.5)(x)
x = Flatten()(x)
x = Dense(nb_classes, activation=activation)(x)
model = Model(DInput, x)
return model
|
#===============================================================================
# Imports
#===============================================================================
import os
import re
import sys
import stat
import base64
import inspect
import subprocess
from abc import (
abstractproperty,
)
from os.path import (
isdir,
isfile,
abspath,
dirname,
basename,
expanduser,
expandvars,
)
from textwrap import dedent
try:
from ConfigParser import (
NoOptionError,
NoSectionError,
RawConfigParser,
)
except ImportError:
from configparser import (
NoOptionError,
NoSectionError,
RawConfigParser,
)
from ctk.path import (
join_path,
)
from ctk.util import (
memoize,
iterable,
classproperty,
Dict,
Options,
)
#===============================================================================
# Globals
#===============================================================================
CONFIG = None
CONFIG_CLASS = None
PROGRAM_NAME = None
COMMAND_NAME = None
COMMAND_MODULES = None
PATH = dirname(abspath(__file__))
NAMESPACE = basename(PATH)
LIB_DIR = join_path(PATH, '..')
BIN_DIR = join_path(LIB_DIR, '../bin')
CONF_DIR = join_path(LIB_DIR, '../conf')
LOGS_DIR = join_path(LIB_DIR, '../logs')
DATA_DIR = join_path(LIB_DIR, '../data')
#fixme: revisit these assertions
#assert LIB_DIR.endswith('lib'), LIB_DIR
#
#for d in (LIB_DIR, BIN_DIR, CONF_DIR, LOGS_DIR):
# assert isdir(d), d
# HOSTFQDN may have the FQDN or it may not; HOSTNAME will always be the
# shortest representation of the hostname.
HOSTFQDN = subprocess.check_output('hostname')[:-len(os.linesep)].lower()
try:
HOSTNAME = HOSTFQDN.split('.')[0]
except TypeError:
HOSTFQDN = HOSTFQDN.decode('utf-8')
HOSTNAME = HOSTFQDN.split('.')[0]
#===============================================================================
# Exceptions
#===============================================================================
class ConfigError(BaseException):
pass
class NoConfigObjectCreated(BaseException):
pass
class ConfigObjectAlreadyCreated(BaseException):
pass
class ConfigClassAlreadySet(BaseException):
pass
#===============================================================================
# Helpers
#===============================================================================
def get_config():
global CONFIG
if not CONFIG:
raise NoConfigObjectCreated()
return CONFIG
def _clear_config_if_already_created():
global CONFIG
if CONFIG:
CONFIG = None
def set_config_class(cls):
assert cls
global CONFIG_CLASS
if CONFIG_CLASS:
raise ConfigClassAlreadySet()
CONFIG_CLASS = cls
def _clear_config_class_if_already_set():
global CONFIG_CLASS
if CONFIG_CLASS:
CONFIG_CLASS = None
#===============================================================================
# Classes
#===============================================================================
class Config(RawConfigParser):
def __init__(self, options=None):
RawConfigParser.__init__(self)
self.optionxform = str
self.options = options if options else Options()
self.hostname = HOSTFQDN
self.shortname = HOSTNAME
self.files = None
self.filename = None
self._is_production = None
self._multiline_pattern = re.compile(r'([^\s].*?)([\s]+\\)?')
self.__gnuwin32_dir = None
global CONFIG
if CONFIG is not None:
raise ConfigObjectAlreadyCreated()
CONFIG = self
@classproperty
@classmethod
def namespace(cls):
return basename(dirname(inspect.getsourcefile(cls)))
@classmethod
def _resolve_dir(cls, name):
path = inspect.getsourcefile(cls)
base = dirname(join_path(path, '../..'))
return join_path(base, name)
@classproperty
@classmethod
def lib_dir(cls):
return cls._resolve_dir('lib')
@classproperty
@classmethod
def bin_dir(cls):
return cls._resolve_dir('bin')
@classproperty
@classmethod
def conf_dir(cls):
return cls._resolve_dir('conf')
@classproperty
@classmethod
def logs_dir(cls):
return cls._resolve_dir('logs')
@classproperty
@classmethod
def data_dir(cls):
d = cls._resolve_dir('data')
if not isdir(d):
os.makedirs(d)
return d
@classproperty
@classmethod
def parent(cls):
try:
return cls.__base__
except AttributeError:
return None
def _absdir(self, name, section='main'):
count = 0
total = 0
max_total = 10
p = self.get(section, name)
while count != 2 and total < max_total:
count = 0
total += 1
if '~' in p:
p = expanduser(p)
count -= 1
else:
count += 1
if '%' in p or '$' in p:
p = expandvars(p)
count -= 1
else:
count += 1
if total == max_total:
args = (section, name)
msg = "Exceeded user/var path recursion depth for %s.%s." % args
raise RuntimeError(msg)
return abspath(p)
@property
def gnuwin32_dir(self):
if not self.__gnuwin32_dir:
d = self._absdir('gnuwin32_dir')
assert isdir(d), d
self.__gnuwin32_dir = d
return self.__gnuwin32_dir
@property
def gnuwin32_bin(self):
return join_path(self.gnuwin32_dir, 'bin')
def get_gnuwin32_exe(self, name):
if not name.endswith('.exe'):
name = name + '.exe'
return join_path(self.gnuwin32_bin, name)
def get_gnu_exe(self, name):
if os.name != 'nt':
return name
else:
return self.get_gnuwin32_exe(name)
@classmethod
def discover_config_files(cls, files=None):
if files is None:
assert cls.parent
files = list()
files.append((cls.conf_dir, cls.namespace))
try:
if cls.parent:
cls.parent.discover_config_files(files)
except AttributeError:
pass
@property
@memoize
def sqlalchemy_ideal_chunk_size(self):
return self.getint('sqlalchemy', 'ideal_chunk_size')
@property
@memoize
def sqlalchemy_min_chunk_size(self):
return self.getint('sqlalchemy', 'min_chunk_size')
def post_load(self):
"""
Called after load() has run. Implement in sublcass to prime
additional attributes/settings once config is available.
"""
pass
def load(self, filename=None):
info = []
self.discover_config_files(info)
files = []
for (conf_dir, namespace) in info:
prefix = join_path(conf_dir, '%s%s.conf' % (namespace, '%s'))
short = (self.hostname != self.shortname)
upper = namespace.upper()
files += [
f for f in [
prefix % '',
prefix % ('-' + self.hostname),
prefix % ('-' + self.shortname) if short else None,
os.environ.get('%s_CONF' % upper) or None,
] if f
]
if filename:
files.append(filename)
with open(files[0], 'r') as f:
self.readfp(f, files[0])
self.read(files[1:])
self.files = files
self.filename = filename
self.post_load()
# vim:set ts=8 sw=4 sts=4 tw=78 et:
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Glance Release Notes documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 3 17:40:50 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'openstackdocstheme',
'reno.sphinxext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sushy Release Notes'
copyright = u'2016, OpenStack Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# The full version, including alpha/beta/rc tags.
release = ''
# The short X.Y version.
version = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'SushyReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'SushyReleaseNotes.tex', u'Sushy Release Notes Documentation',
u'Ironic Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sushyreleasenotes', u'Sushy Release Notes Documentation',
[u'Ironic Developers'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'SushyReleaseNotes', u'Sushy Release Notes Documentation',
u'Ironic Developers', 'SushyReleaseNotes',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Internationalization output ------------------------------
locale_dirs = ['locale/']
|
# -*- coding: utf-8 -*-
"""
pagarmeapisdk
This file was automatically generated by APIMATIC v3.0 (
https://www.apimatic.io ).
"""
from pagarmeapisdk.models.update_price_bracket_request import UpdatePriceBracketRequest
class UpdatePricingSchemeRequest(object):
"""Implementation of the 'UpdatePricingSchemeRequest' model.
Request for updating a pricing scheme
Attributes:
scheme_type (string): Scheme type
price_brackets (list of UpdatePriceBracketRequest): Price brackets
price (int): Price
minimum_price (int): Minimum price
percentage (float): percentual value used in pricing_scheme Percent
"""
# Create a mapping from Model property names to API property names
_names = {
"scheme_type": 'scheme_type',
"price_brackets": 'price_brackets',
"price": 'price',
"minimum_price": 'minimum_price',
"percentage": 'percentage'
}
def __init__(self,
scheme_type=None,
price_brackets=None,
price=None,
minimum_price=None,
percentage=None):
"""Constructor for the UpdatePricingSchemeRequest class"""
# Initialize members of the class
self.scheme_type = scheme_type
self.price_brackets = price_brackets
self.price = price
self.minimum_price = minimum_price
self.percentage = percentage
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
scheme_type = dictionary.get('scheme_type')
price_brackets = None
if dictionary.get('price_brackets') is not None:
price_brackets = [UpdatePriceBracketRequest.from_dictionary(x) for x in dictionary.get('price_brackets')]
price = dictionary.get('price')
minimum_price = dictionary.get('minimum_price')
percentage = dictionary.get('percentage')
# Return an object of this model
return cls(scheme_type,
price_brackets,
price,
minimum_price,
percentage)
|
from cnas.evaluation.core.config import args
from cnas.evaluation.eval import eval_entry
if __name__ == "__main__":
eval_entry(args)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-10-08 07:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('music', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='song',
name='is_favorite',
field=models.BooleanField(default=False),
),
]
|
import cmiles
import fragmenter
import json
mol_id = cmiles.get_molecule_ids('OCCO', strict=False)
mapped_smiles = (mol_id['canonical_isomeric_explicit_hydrogen_mapped_smiles'])
mol = cmiles.utils.load_molecule(mapped_smiles)
torsions = fragmenter.torsions.find_torsions(mol)
dihedrals_list = [torsions['internal']['torsion_0'], torsions['terminal']['torsion_0']]
single_conformer = fragmenter.chemi.generate_conformers(mol, max_confs=1)
mult_conformers_grid = fragmenter.chemi.generate_grid_conformers(mol, dihedrals=dihedrals_list, intervals=[90, 120])
qm_mol_single_conf = cmiles.utils.mol_to_map_ordered_qcschema(single_conformer, mapped_smiles)
qm_mol_mult_conf = [cmiles.utils.mol_to_map_ordered_qcschema(conf, mapped_smiles) for conf in mult_conformers_grid.GetConfs()]
job_index_1d = cmiles.utils.to_canonical_label(mapped_smiles, dihedrals_list[0])
job_index_2d = job_index_1d + ',' + cmiles.utils.to_canonical_label(mapped_smiles , dihedrals_list[1])
job_index_1d_mult = job_index_1d + '_' + str(len(qm_mol_mult_conf))
job_index_2d_mult = job_index_2d + '_' + str(len(qm_mol_mult_conf))
job_index_1d = job_index_1d + '_1'
job_index_2d = job_index_2d + '_1'
torsion_drive_inputs = {
job_index_1d: {
'dihedral': [dihedrals_list[0]],
'grid': [15],
'input_molecules': qm_mol_single_conf,
'cmiles_identifiers': mol_id
},
job_index_2d: {
'dihedral': dihedrals_list,
'grid': [15, 15],
'input_molecules': qm_mol_single_conf,
'cmiles_identifiers': mol_id
},
job_index_1d_mult: {
'dihedral': [dihedrals_list[0]],
'grid': [15],
'input_molecules': qm_mol_mult_conf,
'cmiles_identifiers': mol_id
},
job_index_2d_mult: {
'dihedral': dihedrals_list,
'grid': [15, 15],
'input_molecules': qm_mol_mult_conf,
'cmiles_identifiers': mol_id
}
}
with open('torsiondrive_inputs.json', 'w') as f:
json.dump(torsion_drive_inputs, f, indent=2, sort_keys=True)
|
import os
import logging
import urllib.request
import requests
import re
import io
import us
import zipfile
import json
from datetime import datetime
from functools import lru_cache
from enum import Enum
import pandas as pd
import numpy as np
from covidactnow.datapublic.common_fields import CommonFields
from libs.datasets import combined_datasets
from libs.datasets.timeseries import TimeseriesDataset
from libs.datasets.dataset_utils import AggregationLevel
from pyseir.utils import get_run_artifact_path, RunArtifact, ewma_smoothing
log = logging.getLogger(__name__)
DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "pyseir_data")
class HospitalizationCategory(Enum):
HOSPITALIZED = "hospitalized"
ICU = "icu"
def __str__(self):
return str(self.value)
class HospitalizationDataType(Enum):
CUMULATIVE_HOSPITALIZATIONS = "cumulative_hospitalizations"
CURRENT_HOSPITALIZATIONS = "current_hospitalizations"
def load_zip_get_file(url, file, decoder="utf-8"):
"""
Load a zipfile from a URL and extract a single file. Note that this is
not ideal and may fail for large files since the files must fit in memory.
Parameters
----------
url: str
URL to read from.
file: str
Filename to pull out of the zipfile.
decoder: str
Usually None for raw bytes or 'utf-8', or 'latin1'
Returns
-------
file_buffer: io.BytesIO or io.StringIO
The file buffer for the requested file if decoder is None else return
a decoded StringIO.
"""
remotezip = urllib.request.urlopen(url)
zipinmemory = io.BytesIO(remotezip.read())
zf = zipfile.ZipFile(zipinmemory)
byte_string = zf.read(file)
if decoder:
string = byte_string.decode(decoder)
return io.StringIO(string)
else:
return io.BytesIO(byte_string)
def cache_mobility_data():
"""
Pulled from https://github.com/descarteslabs/DL-COVID-19
"""
log.info("Downloading mobility data.")
url = "https://raw.githubusercontent.com/descarteslabs/DL-COVID-19/master/DL-us-mobility-daterow.csv"
dtypes_mapping = {
"country_code": str,
"admin_level": int,
"admin1": str,
"admin2": str,
"fips": str,
"samples": int,
"m50": float,
"m50_index": float,
}
df = pd.read_csv(filepath_or_buffer=url, parse_dates=["date"], dtype=dtypes_mapping)
df__m50 = df.query("admin_level == 2")[["fips", "date", "m50"]]
df__m50_index = df.query("admin_level == 2")[["fips", "date", "m50_index"]]
df__m50__final = df__m50.groupby("fips").agg(list).reset_index()
df__m50_index__final = df__m50_index.groupby("fips").agg(list).reset_index()
df__m50__final["m50"] = df__m50__final["m50"].apply(lambda x: np.array(x))
df__m50_index__final["m50_index"] = df__m50_index__final["m50_index"].apply(
lambda x: np.array(x)
)
df__m50__final.to_pickle(os.path.join(DATA_DIR, "mobility_data__m50.pkl"))
df__m50_index__final.to_pickle(os.path.join(DATA_DIR, "mobility_data__m50_index.pkl"))
def cache_public_implementations_data():
"""
Pulled from https://github.com/JieYingWu/COVID-19_US_County-level_Summaries
"""
log.info("Downloading public implementations data")
url = "https://raw.githubusercontent.com/JieYingWu/COVID-19_US_County-level_Summaries/master/raw_data/national/public_implementations_fips.csv"
data = requests.get(url, verify=True).content.decode("utf-8")
data = re.sub(r",(\d+)-(\w+)", r",\1-\2-2020", data) # NOTE: This assumes the year 2020
date_cols = [
"stay at home",
">50 gatherings",
">500 gatherings",
"public schools",
"restaurant dine-in",
"entertainment/gym",
"Federal guidelines",
"foreign travel ban",
]
df = pd.read_csv(io.StringIO(data), parse_dates=date_cols, dtype="str").drop(
["Unnamed: 1", "Unnamed: 2"], axis=1
)
df.columns = [
col.replace(">", "").replace(" ", "_").replace("/", "_").lower() for col in df.columns
]
df.fips = df.fips.apply(lambda x: x.zfill(5))
df.to_pickle(os.path.join(DATA_DIR, "public_implementations_data.pkl"))
@lru_cache(maxsize=32)
def load_county_metadata():
"""
Return county level metadata such as age distributions, populations etc..
Returns
-------
: pd.DataFrame
"""
county_metadata = pd.read_json(
os.path.join(DATA_DIR, "county_metadata.json"), dtype={"fips": "str"}
)
# Fix state names
county_metadata.loc[:, "state"] = county_metadata["fips"].apply(
lambda x: us.states.lookup(x[:2]).name
)
return county_metadata
@lru_cache(maxsize=32)
def load_county_metadata_by_state(state=None):
"""
Generate a dataframe that contains county metadata aggregated at state
level.
Parameters
----------
state: str or list(str)
Name of state to load the metadata for.
Returns
-------
state_metadata: pd.DataFrame
"""
# aggregate into state level metadata
state_metadata = load_county_metadata()
if state is not None:
state = [state] if isinstance(state, str) else list(state)
else:
state = state_metadata["state"].unique()
state = [s.title() for s in state]
state_metadata = state_metadata[state_metadata.state.isin(state)]
density_measures = ["housing_density", "population_density"]
for col in density_measures:
state_metadata.loc[:, col] = state_metadata[col] * state_metadata["total_population"]
age_dist = state_metadata.groupby("state")["age_distribution"].apply(
lambda l: np.stack(np.array(l)).sum(axis=0)
)
density_info = state_metadata.groupby("state").agg(
{
"population_density": lambda x: sum(x),
"housing_density": lambda x: sum(x),
"total_population": lambda x: sum(x),
"fips": list,
}
)
age_bins = state_metadata[["state", "age_bin_edges"]].groupby("state").first()
state_metadata = pd.concat([age_dist, density_info, age_bins], axis=1)
for col in density_measures:
state_metadata[col] /= state_metadata["total_population"]
return state_metadata
@lru_cache(maxsize=32)
def load_ensemble_results(fips):
"""
Retrieve ensemble results for a given state or county fips code.
Parameters
----------
fips: str
State or county FIPS to load.
Returns
-------
ensemble_results: dict
"""
output_filename = get_run_artifact_path(fips, RunArtifact.ENSEMBLE_RESULT)
if not os.path.exists(output_filename):
return None
with open(output_filename) as f:
return json.load(f)
@lru_cache(maxsize=32)
def load_county_metadata_by_fips(fips):
"""
Generate a dictionary for a county which includes county metadata.
Parameters
----------
fips: str
Returns
-------
county_metadata: dict
Dictionary of metadata for the county. The keys are:
['state', 'county', 'total_population', 'population_density',
'housing_density', 'age_distribution', 'age_bin_edges']
"""
county_metadata = load_county_metadata()
county_metadata_merged = county_metadata.set_index("fips").loc[fips].to_dict()
for key, value in county_metadata_merged.items():
if np.isscalar(value) and not isinstance(value, str):
county_metadata_merged[key] = float(value)
return county_metadata_merged
@lru_cache(maxsize=32)
def get_all_fips_codes_for_a_state(state: str):
"""Returns a list of fips codes for a state
Arguments:
state {str} -- the full state name
Returns:
fips [list] -- a list of fips codes for a state
"""
df = load_county_metadata()
all_fips = df[df["state"].str.lower() == state.lower()].fips
return all_fips
@lru_cache(maxsize=32)
def load_new_case_data_by_fips(
fips, t0, include_testing_correction=False, testing_correction_smoothing_tau=5
):
"""
Get data for new cases.
Parameters
----------
fips: str
County fips to lookup.
t0: datetime
Datetime to offset by.
include_testing_correction: bool
If True, include a correction for new expanded or decreaseed test
coverage.
testing_correction_smoothing_tau: float
expected_positives_from_test_increase is smoothed based on an
exponentially weighted moving average of decay factor specified here.
Returns
-------
times: array(float)
List of float days since t0 for the case and death counts below
observed_new_cases: array(int)
Array of new cases observed each day.
observed_new_deaths: array(int)
Array of new deaths observed each day.
"""
county_case_timeseries = combined_datasets.get_timeseries_for_fips(
fips, columns=[CommonFields.CASES, CommonFields.DEATHS], min_range_with_some_value=True
)
county_case_data = county_case_timeseries.data
times_new = (county_case_data["date"] - t0).dt.days.iloc[1:]
observed_new_cases = (
county_case_data["cases"].values[1:] - county_case_data["cases"].values[:-1]
)
if include_testing_correction:
df_new_tests = load_new_test_data_by_fips(
fips, t0, smoothing_tau=testing_correction_smoothing_tau
)
df_cases = pd.DataFrame({"times": times_new, "new_cases": observed_new_cases})
df_cases = df_cases.merge(df_new_tests, how="left", on="times")
df_cases["new_cases"] -= df_cases["expected_positives_from_test_increase"].fillna(0)
observed_new_cases = df_cases["new_cases"].values
observed_new_deaths = (
county_case_data["deaths"].values[1:] - county_case_data["deaths"].values[:-1]
)
# Clip because there are sometimes negatives either due to data reporting or
# corrections in case count. These are always tiny so we just make
# downstream easier to work with by clipping.
return times_new, observed_new_cases.clip(min=0), observed_new_deaths.clip(min=0)
@lru_cache(maxsize=32)
def load_new_case_data_by_state(
state, t0, include_testing_correction=False, testing_correction_smoothing_tau=5
):
"""
Get data for new cases at state level.
Parameters
----------
state: str
State full name.
t0: datetime
Datetime to offset by.
include_testing_correction: bool
If True, include a correction for new expanded or decreaseed test
coverage.
testing_correction_smoothing_tau: float
expected_positives_from_test_increase is smoothed based on an
exponentially weighted moving average of decay factor specified here.
Returns
-------
times: array(float)
List of float days since t0 for the case and death counts below
observed_new_cases: array(int)
Array of new cases observed each day.
observed_new_deaths: array(int)
Array of new deaths observed each day.
"""
state_abbrev = us.states.lookup(state).abbr
state_timeseries = combined_datasets.get_timeseries_for_state(
state_abbrev,
columns=[CommonFields.CASES, CommonFields.DEATHS],
min_range_with_some_value=True,
)
state_case_data = state_timeseries.data
times_new = (state_case_data[CommonFields.DATE] - t0).dt.days.iloc[1:]
observed_new_cases = (
state_case_data[CommonFields.CASES].values[1:]
- state_case_data[CommonFields.CASES].values[:-1]
)
if include_testing_correction:
df_new_tests = load_new_test_data_by_fips(
us.states.lookup(state).fips, t0, smoothing_tau=testing_correction_smoothing_tau
)
df_cases = pd.DataFrame({"times": times_new, "new_cases": observed_new_cases})
df_cases = df_cases.merge(df_new_tests, how="left", on="times")
df_cases["new_cases"] -= df_cases["expected_positives_from_test_increase"].fillna(0)
observed_new_cases = df_cases["new_cases"].values
observed_new_deaths = (
state_case_data[CommonFields.DEATHS].values[1:]
- state_case_data[CommonFields.DEATHS].values[:-1]
)
return (times_new, np.array(observed_new_cases).clip(min=0), observed_new_deaths.clip(min=0))
def get_hospitalization_data():
"""
Since we're using this data for hospitalized data only, only returning
values with hospitalization data. I think as the use cases of this data source
expand, we may not want to drop. For context, as of 4/8 607/1821 rows contained
hospitalization data.
Returns
-------
TimeseriesDataset
"""
data = combined_datasets.build_us_timeseries_with_all_fields().data
has_current_hospital = data[CommonFields.CURRENT_HOSPITALIZED].notnull()
has_cumulative_hospital = data[CommonFields.CUMULATIVE_HOSPITALIZED].notnull()
return TimeseriesDataset(data[has_current_hospital | has_cumulative_hospital])
@lru_cache(maxsize=32)
def load_hospitalization_data(
fips: str,
t0: datetime,
category: HospitalizationCategory = HospitalizationCategory.HOSPITALIZED,
):
"""
Obtain hospitalization data. We clip because there are sometimes negatives
either due to data reporting or corrections in case count. These are always
tiny so we just make downstream easier to work with by clipping.
Parameters
----------
fips: str
County fips to lookup.
t0: datetime
Datetime to offset by.
category: HospitalizationCategory
Returns
-------
relative_days: array(float)
List of float days since t0 for the hospitalization data.
observed_hospitalizations: array(int)
Array of new cases observed each day.
type: HospitalizationDataType
Specifies cumulative or current hospitalizations.
"""
hospitalization_data = get_hospitalization_data().get_data(
AggregationLevel.COUNTY, country="USA", fips=fips
)
if len(hospitalization_data) == 0:
return None, None, None
if (hospitalization_data[f"current_{category}"] > 0).any():
hospitalization_data = hospitalization_data[
hospitalization_data[f"current_{category}"].notnull()
]
relative_days = (hospitalization_data["date"].dt.date - t0.date()).dt.days.values
return (
relative_days,
hospitalization_data[f"current_{category}"].values.clip(min=0),
HospitalizationDataType.CURRENT_HOSPITALIZATIONS,
)
elif (hospitalization_data[f"cumulative_{category}"] > 0).any():
hospitalization_data = hospitalization_data[
hospitalization_data[f"cumulative_{category}"].notnull()
]
relative_days = (hospitalization_data["date"].dt.date - t0.date()).dt.days.values
cumulative = hospitalization_data[f"cumulative_{category}"].values.clip(min=0)
# Some minor glitches for a few states..
for i, val in enumerate(cumulative[1:]):
if cumulative[i] > cumulative[i + 1]:
cumulative[i] = cumulative[i + 1]
return relative_days, cumulative, HospitalizationDataType.CUMULATIVE_HOSPITALIZATIONS
else:
return None, None, None
@lru_cache(maxsize=32)
def load_hospitalization_data_by_state(
state: str,
t0: datetime,
category: HospitalizationCategory = HospitalizationCategory.HOSPITALIZED,
):
"""
Obtain hospitalization data. We clip because there are sometimes negatives
either due to data reporting or corrections in case count. These are always
tiny so we just make downstream easier to work with by clipping.
Parameters
----------
state: str
State to lookup.
t0: datetime
Datetime to offset by.
category: HospitalizationCategory
'icu' for just ICU or 'hospitalized' for all ICU + Acute.
Returns
-------
times: array(float) or NoneType
List of float days since t0 for the hospitalization data.
observed_hospitalizations: array(int) or NoneType
Array of new cases observed each day.
type: HospitalizationDataType
Specifies cumulative or current hospitalizations.
"""
abbr = us.states.lookup(state).abbr
hospitalization_data = combined_datasets.build_us_timeseries_with_all_fields().get_data(
AggregationLevel.STATE, country="USA", state=abbr
)
if len(hospitalization_data) == 0:
return None, None, None
if (hospitalization_data[f"current_{category}"] > 0).any():
hospitalization_data = hospitalization_data[
hospitalization_data[f"current_{category}"].notnull()
]
times_new = (hospitalization_data["date"].dt.date - t0.date()).dt.days.values
return (
times_new,
hospitalization_data[f"current_{category}"].values.clip(min=0),
HospitalizationDataType.CURRENT_HOSPITALIZATIONS,
)
elif (hospitalization_data[f"cumulative_{category}"] > 0).any():
hospitalization_data = hospitalization_data[
hospitalization_data[f"cumulative_{category}"].notnull()
]
times_new = (hospitalization_data["date"].dt.date - t0.date()).dt.days.values
cumulative = hospitalization_data[f"cumulative_{category}"].values.clip(min=0)
# Some minor glitches for a few states..
for i, val in enumerate(cumulative[1:]):
if cumulative[i] > cumulative[i + 1]:
cumulative[i] = cumulative[i + 1]
return (
times_new,
hospitalization_data[f"cumulative_{category}"].values.clip(min=0),
HospitalizationDataType.CUMULATIVE_HOSPITALIZATIONS,
)
else:
return None, None, None
def get_current_hospitalized(fips, t0, category: HospitalizationCategory):
"""
Return the current estimate for the number of people in the given category for a given fips.
Treats a length 2 fips as a state and a length 5 fips as a county
Parameters
----------
fips: str
US fips to lookup.
t0: datetime
Datetime to offset by.
category: HospitalizationCategory
'icu' for just ICU or 'hospitalized' for all ICU + Acute.
Returns
-------
time: float
Days since t0 for the hospitalization data.
current estimate: float
The most recent provided value for the current occupied in the requested category.
"""
df = combined_datasets.get_timeseries_for_fips(fips).data
return _get_current_hospitalized(df, t0, category)
def _get_current_hospitalized(
df: pd.DataFrame, t0: datetime, category: HospitalizationCategory,
):
"""
Given a DataFrame that contains values icu or hospitalization data
for a single county/state, this function returns the latest value.
Parameters
----------
df
dataframe containing either current_ or cumulative_ values for a single county or state
t0
beginning of observation period
category
the type of current data to be returned
Returns
-------
time: float
Days since t0 for the hospitalization data.
current estimate: float
The most recent provided value for the current occupied in the requested category.
"""
# TODO: No need to pass t0 down and back up. Can return a datetime that consumer converts.
NUM_DAYS_LOOKBACK = 3
# Agencies will start and stop reporting values. Also, depending on the time of day some columns
# in a day row may propagate before others. Therefore, we don't want to just take the most
# recent value which may be None, nor take just the most recent any value, which may be weeks
# ago.
# Datetimes are in naive but UTC. Look at possible values that are within this time window.
date_minimum = pd.Timestamp.utcnow().tz_localize(None) - pd.Timedelta(days=NUM_DAYS_LOOKBACK)
date_mask = df["date"] >= date_minimum.to_datetime64()
recent_days_index = df.index[date_mask]
# Look back from most recent and find the first (latest) non-null value. If the loop drops out,
# that means there were no non-null values in the window of interest, and we return Nones.
for idx in reversed(recent_days_index): # Iterate from most recent backwards
if pd.notnull(df[f"current_{category}"][idx]):
current_latest = df[f"current_{category}"][idx]
times_new = df["date"].dt.date - t0.date()
times_new_latest = times_new[idx].days
return times_new_latest, current_latest
else: # No values found in recent window, so return None
return None, None
@lru_cache(maxsize=32)
def load_new_test_data_by_fips(fips, t0, smoothing_tau=5, correction_threshold=5):
"""
Return a timeseries of new tests for a geography. Note that due to reporting
discrepancies county to county, and state-to-state, these often do not go
back as far as case data.
Parameters
----------
fips: str
State or county fips code
t0: datetime
Reference datetime to use.
Returns
-------
df: pd.DataFrame
DataFrame containing columns:
- 'date',
- 'new_tests': Number of total tests performed that day
- 'increase_in_new_tests': Increase in tests performed that day vs
previous day
- 'positivity_rate':
Test positivity rate
- 'expected_positives_from_test_increase':
Number of positive detections expected just from increased test
capacity.
- times: days since t0 for this observation.
smoothing_tau: int
expected_positives_from_test_increase is smoothed based on an
exponentially weighted moving average of decay factor specified here.
correction_threshold: int
Do not apply a correction if the incident cases per day is lower than
this value. There can be instability if case counts are very low.
"""
fips_timeseries = combined_datasets.get_timeseries_for_fips(fips)
df = fips_timeseries.data.copy()
# Aggregation level is None as fips is unique across aggregation levels.
df = df.loc[
(df[CommonFields.POSITIVE_TESTS].notnull())
& (df[CommonFields.NEGATIVE_TESTS].notnull())
& ((df[CommonFields.POSITIVE_TESTS] + df[CommonFields.NEGATIVE_TESTS]) > 0),
:,
]
df["positivity_rate"] = df[CommonFields.POSITIVE_TESTS] / (
df[CommonFields.POSITIVE_TESTS] + df[CommonFields.NEGATIVE_TESTS]
)
df["new_positive"] = np.append([0], np.diff(df[CommonFields.POSITIVE_TESTS]))
# The first derivative gets us new instead of cumulative tests while the second derivative gives us the change in new test rate.
df["new_tests"] = np.append(
[0], np.diff(df[CommonFields.POSITIVE_TESTS] + df[CommonFields.NEGATIVE_TESTS])
)
df["increase_in_new_tests"] = np.append([0], np.diff(df["new_tests"]))
# dPositive / dTotal = 0.65 * positivity_rate was empirically determined by looking at
# the increase in positives day-over-day relative to the increase in total tests across all 50 states.
df["expected_positives_from_test_increase"] = (
df["increase_in_new_tests"] * 0.65 * df["positivity_rate"]
)
df = df[
[
"date",
"new_tests",
"increase_in_new_tests",
"positivity_rate",
"expected_positives_from_test_increase",
"new_positive",
]
]
df = df.loc[df.increase_in_new_tests.notnull() & df.positivity_rate.notnull(), :]
df["expected_positives_from_test_increase"] = ewma_smoothing(
df["expected_positives_from_test_increase"], smoothing_tau
)
df.loc[df["new_positive"] < 5, "expected_positives_from_test_increase"] = 0
df["times"] = [
int((date - t0).days) for date in pd.to_datetime(df["date"].values).to_pydatetime()
]
return df
def load_cdc_hospitalization_data():
"""
Return age specific hospitalization rate.
Source: https://www.cdc.gov/mmwr/volumes/69/wr/mm6912e2.htm#T1_down
Table has columns: lower_age, upper_age, mean_age, lower_{outcome type},
upper_{outcome type}, and mean_{outcome type}.
Outcome types and their meanings:
- hosp: percentage of all hospitalizations among cases
- icu: percentage of icu admission among cases
- hgen: percentage of general hospitalization (all hospitalizations - icu)
- fatality: case fatality rate
"""
return pd.read_csv(os.path.join(DATA_DIR, "cdc_hospitalization_data.csv"))
@lru_cache(maxsize=1)
def load_mobility_data_m50():
"""
Return mobility data without normalization
Returns
-------
: pd.DataFrame
"""
return pd.read_pickle(os.path.join(DATA_DIR, "mobility_data__m50.pkl"))
@lru_cache(maxsize=1)
def load_mobility_data_m50_index():
"""
Return mobility data with normalization: per
https://github.com/descarteslabs/DL-COVID-19 normal m50 is defined during
2020-02-17 to 2020-03-07.
Returns
-------
: pd.DataFrame
"""
return pd.read_pickle(os.path.join(DATA_DIR, "mobility_data__m50_index.pkl")).set_index("fips")
@lru_cache(maxsize=1)
def load_public_implementations_data():
"""
Return public implementations data
Returns
-------
: pd.DataFrame
"""
return pd.read_pickle(os.path.join(DATA_DIR, "public_implementations_data.pkl")).set_index(
"fips"
)
def load_contact_matrix_data_by_fips(fips):
"""
Load contact matrix for given fips.
Source: polymod survey in UK
(https://journals.plos.org/plosmedicine/article?id=10.1371/journal.pmed.0050074).
Contact matrix at each county has been adjusted by county demographics.
Parameters
----------
fips: str
State or county FIPS code.
Returns
-------
: dict
With fips as keys and values:
- 'contact_matrix': list(list)
number of contacts made by age group in rows with age groups in
columns
- 'age_bin_edges': list
lower age limits to define age groups
- 'age_distribution': list
population size of each age group
"""
fips = [fips] if isinstance(fips, str) else list(fips)
state_abbr = us.states.lookup(fips[0][:2]).abbr
path = os.path.join(DATA_DIR, "contact_matrix", "contact_matrix_fips_%s.json" % state_abbr)
contact_matrix_data = json.loads(open(path).read())
return {s: contact_matrix_data[s] for s in fips}
def load_whitelist():
"""
Load the whitelist result.
Returns
-------
whitelist: pd.DataFrame
DataFrame containing a whitelist of product features for counties.
"""
# Whitelist path isn't state specific, but the call requires ANY fips
PLACEHOLDER_FIPS = "06"
path = get_run_artifact_path(fips=PLACEHOLDER_FIPS, artifact=RunArtifact.WHITELIST_RESULT)
return pd.read_json(path, dtype={"fips": str})
def cache_all_data():
"""
Download all datasets locally.
"""
cache_mobility_data()
cache_public_implementations_data()
def get_compartment_value_on_date(fips, compartment, date, ensemble_results=None):
"""
Return the value of compartment at a specified date.
Parameters
----------
fips: str
State or County fips.
compartment: str
Name of the compartment to retrieve.
date: datetime
Date to retrieve values for.
ensemble_results: NoneType or dict
Pass in the pre-loaded simulation data to save time, else load it.
Pass in the pre-loaded simulation data to save time, else load it.
Returns
-------
value: float
Value of compartment on a given date.
"""
if ensemble_results is None:
ensemble_results = load_ensemble_results(fips)
# Circular import avoidance
from pyseir.inference.fit_results import load_inference_result
simulation_start_date = datetime.fromisoformat(load_inference_result(fips)["t0_date"])
date_idx = int((date - simulation_start_date).days)
return ensemble_results["suppression_policy__inferred"][compartment]["ci_50"][date_idx]
if __name__ == "__main__":
cache_all_data()
|
# Copyright (C) 2016 Kevin Ross
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class ModifiesBootConfig(Signature):
name = "modifies_boot_config"
description = "Modifies boot configuration settings"
severity = 3
categories = ["persistance", "ransomware"]
authors = ["Kevin Ross"]
minimum = "2.0"
ttp = ["T1067"]
filter_apinames = "ShellExecuteExW", "CreateProcessInternalW",
def on_call(self, call, process):
if call["api"] == "CreateProcessInternalW":
buf = call["arguments"]["command_line"].lower()
else:
buf = call["arguments"]["filepath"].lower()
if "bcdedit" in buf and "set" in buf:
self.mark_ioc("command", buf)
def on_complete(self):
return self.has_marks()
|
"""Unit test package for disambigufile."""
|
"""
Tests to visually inspect the results of the library's functionality.
Run checks via
python check_visually.py
"""
from __future__ import print_function, division
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
import numpy as np
from scipy import ndimage, misc
from skimage import data
import argparse
def main():
parser = argparse.ArgumentParser(description="Check augmenters visually.")
parser.add_argument('--only', default=None, help="If this is set, then only the results of an augmenter with this name will be shown.", required=False)
args = parser.parse_args()
images = [
misc.imresize(ndimage.imread("../quokka.jpg")[0:643, 0:643], (128, 128)),
misc.imresize(data.astronaut(), (128, 128))
]
augmenters = [
iaa.Noop(name="Noop"),
iaa.OneOf(children=[
iaa.CoarseDropout(p=0.5, size_percent=0.05),
iaa.AdditiveGaussianNoise(scale=0.1*255),
iaa.Crop(percent=0.1)
], name="OneOf"),
iaa.Crop(px=(0, 8), name="Crop-px"),
iaa.Crop(percent=(0, 0.1), name="Crop-percent"),
iaa.Fliplr(0.5, name="Fliplr"),
iaa.Flipud(0.5, name="Flipud"),
iaa.Superpixels(p_replace=0.75, n_segments=50, name="Superpixels"),
iaa.Grayscale(0.5, name="Grayscale0.5"),
iaa.Grayscale(1.0, name="Grayscale1.0"),
iaa.AverageBlur(k=(3, 11), name="AverageBlur"),
iaa.GaussianBlur((0, 3.0), name="GaussianBlur"),
iaa.MedianBlur(k=(3, 11), name="MedianBlur"),
iaa.Sharpen(alpha=(0.1, 1.0), lightness=(0, 2.0), name="Sharpen"),
iaa.Emboss(alpha=(0.1, 1.0), strength=(0, 2.0), name="Emboss"),
iaa.EdgeDetect(alpha=(0.1, 1.0), name="EdgeDetect"),
iaa.DirectedEdgeDetect(alpha=(0.1, 1.0), direction=(0, 1.0), name="DirectedEdgeDetect"),
iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.1*255), name="AdditiveGaussianNoise"),
iaa.Dropout((0.0, 0.1), name="Dropout"),
iaa.CoarseDropout(p=0.05, size_percent=(0.05, 0.5), name="CoarseDropout"),
iaa.Invert(p=0.5, name="Invert"),
iaa.Invert(p=0.5, per_channel=True, name="InvertPerChannel"),
iaa.Add((-50, 50), name="Add"),
iaa.Add((-50, 50), per_channel=True, name="AddPerChannel"),
iaa.AddElementwise((-50, 50), name="AddElementwise"),
iaa.Multiply((0.5, 1.5), name="Multiply"),
iaa.Multiply((0.5, 1.5), per_channel=True, name="MultiplyPerChannel"),
iaa.MultiplyElementwise((0.5, 1.5), name="MultiplyElementwise"),
iaa.ContrastNormalization(alpha=(0.5, 2.0), name="ContrastNormalization"),
iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
translate_px={"x": (-16, 16), "y": (-16, 16)},
rotate=(-45, 45),
shear=(-16, 16),
order=ia.ALL,
cval=(0, 255),
mode=ia.ALL,
name="Affine"
),
iaa.PiecewiseAffine(scale=0.03, nb_rows=(2, 6), nb_cols=(2, 6), name="PiecewiseAffine"),
iaa.ElasticTransformation(alpha=(0.5, 8.0), sigma=1.0, name="ElasticTransformation")
]
augmenters.append(iaa.Sequential([iaa.Sometimes(0.2, aug.copy()) for aug in augmenters], name="Sequential"))
augmenters.append(iaa.Sometimes(0.5, [aug.copy() for aug in augmenters], name="Sometimes"))
for augmenter in augmenters:
if args.only is None or augmenter.name == args.only:
print("Augmenter: %s" % (augmenter.name,))
grid = augmenter.draw_grid(images, rows=1, cols=16)
misc.imshow(grid)
if __name__ == "__main__":
main()
|
import torch
import torch.nn as nn
class PGD:
def __init__(self, eps=60 / 255., step_size=20 / 255., max_iter=10, random_init=True,
targeted=False, loss_fn=nn.CrossEntropyLoss(), batch_size=64):
self.eps = eps
self.step_size = step_size
self.max_iter = max_iter
self.random_init = random_init
self.targeted = targeted
self.loss_fn = loss_fn
self.batch_size = batch_size
def attack(self, model, x, y, x_adv=None, targets=None):
if x_adv is None:
if self.random_init:
x_adv = 2 * self.eps * (torch.rand_like(x) - 0.5) + x
x_adv = x_adv.clamp(0.0, 1.0)
else:
x_adv = torch.clone(x).detach()
x_adv.requires_grad_(True)
pred_adv = model(x_adv)
if isinstance(pred_adv, (list, tuple)):
pred_adv = pred_adv[-1]
if self.targeted:
assert targets is not None, "Target labels not found!"
loss = self.loss_fn(pred_adv, targets)
else:
loss = self.loss_fn(pred_adv, y)
grad = torch.autograd.grad(loss, x_adv)[0]
pert = self.step_size * grad.sign()
x_adv = (x_adv + pert).clamp(0.0, 1.0).detach()
pert = (x_adv - x).clamp(-self.eps, self.eps)
return x + pert
def generate(self, model, x, y=None, targets=None, device=torch.device("cpu")):
model.to(device)
model.eval()
x_adv = []
for i in range(0, x.size(0), self.batch_size):
x_batch = x[i: i + self.batch_size].to(device)
if y is None:
y_batch = model(x_batch)
if isinstance(y_batch, tuple):
y_batch = y_batch[-1]
y_batch = y_batch.max(dim=-1)[1].to(device)
else:
y_batch = y[i: i + self.batch_size].to(device)
for j in range(self.max_iter):
if j == 0:
x_adv_batch = self.attack(model, x_batch, y_batch, targets=targets)
else:
x_adv_batch = self.attack(model, x_batch, y_batch, x_adv_batch, targets=targets)
x_adv.append(x_adv_batch)
return torch.cat(x_adv, dim=0).cpu()
|
from support import *
import numpy as np
import shap
np.random.seed(1)
def combined(feature_perturbation, twin=False):
n = 2000
shap_test_size = 2000
X, y, df, eqn = toy_weight_data(n)
X = df.drop('weight', axis=1)
y = df['weight']
rf = RandomForestRegressor(n_estimators=40, oob_score=True, n_jobs=-1)
rf.fit(X,y)
if feature_perturbation=='interventional':
explainer = shap.TreeExplainer(rf, data=shap.sample(X, 500), feature_perturbation='interventional')
xlabel = "height\n(b)"
else:
explainer = shap.TreeExplainer(rf, feature_perturbation='tree_path_dependent')
xlabel = "height\n(a)"
shap_sample = X[:shap_test_size]
shap_values = explainer.shap_values(shap_sample, check_additivity=False)
GREY = '#444443'
fig, ax = plt.subplots(1, 1, figsize=(3.8,3.2))
shap.dependence_plot("height", shap_values, shap_sample,
interaction_index=None, ax=ax, dot_size=5,
show=False, alpha=1)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
ax.spines['right'].set_linewidth(.5)
ax.spines['top'].set_linewidth(.5)
ax.set_ylabel("Impact on weight\n(height SHAP)", fontsize=12)
ax.set_xlabel(xlabel, fontsize=12)
ax.tick_params(axis='both', which='major', labelsize=10)
ax.plot([70,70], [-75,75], '--', lw=.6, color=GREY)
ax.text(69.8,60, "Max female height", horizontalalignment='right',
fontsize=9)
leaf_xranges, leaf_slopes, slope_counts_at_x, dx, slope_at_x, pdpx, pdpy, ignored = \
partial_dependence(X=X, y=y, colname='height')
ax.set_ylim(-77,75)
# ax.set_xlim(min(pdpx), max(pdpx))
ax.set_xticks([60,65,70,75])
ax.set_yticks([-75,-60,-40,-20,0,20,40,60,75])
ax.set_title(f"SHAP {feature_perturbation}", fontsize=12)
# ax.set_ylim(-40,70)
print(min(pdpx), max(pdpx))
print(min(pdpy), max(pdpy))
rise = max(pdpy) - min(pdpy)
run = max(pdpx) - min(pdpx)
slope = rise/run
print(slope)
# ax.plot([min(pdpx),max(pdpyX['height'])], [0,]
if twin:
ax2 = ax.twinx()
# ax2.set_xlim(min(pdpx), max(pdpx))
ax2.set_ylim(min(pdpy)-5, max(pdpy)+5)
ax2.set_xticks([60,65,70,75])
ax2.set_yticks([0,20,40,60,80,100,120,140,150])
ax2.set_ylabel("weight", fontsize=12)
ax2.plot(pdpx, pdpy, '.', markersize=1, c='k')
# ax2.text(65,25, f"StratPD slope = {slope:.1f}")
ax2.annotate(f"StratPD (slope={slope:.1f})", (64.65,39), xytext=(66,18),
horizontalalignment='left',
arrowprops=dict(facecolor='black', width=.5, headwidth=5, headlength=5),
fontsize=9)
plt.tight_layout()
plt.savefig(f"../images/weight-shap-{feature_perturbation}.pdf", bbox_inches="tight", pad_inches=0)
plt.show()
# weight()
combined('tree_path_dependent', twin=True)
combined('interventional', twin=True)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class QNetwork(nn.Module):
def __init__(self, state_size, action_size, seed, hidden_layers):
"""
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
hidden_layers (list of int): number of nodes for each hidden layer
"""
super(QNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
fc_first = nn.Linear(state_size, hidden_layers[0])
self.hidden_layers = nn.ModuleList([fc_first])
layer_sizes = zip(hidden_layers[:-1], hidden_layers[1:])
self.hidden_layers.extend([nn.Linear(h1, h2)
for h1, h2 in layer_sizes])
self.output = nn.Linear(hidden_layers[-1], action_size)
def forward(self, state):
z = state
for linear in self.hidden_layers:
z = F.leaky_relu(linear(z))
output = self.output(z)
return output
|
import time
from collections import deque
from copy import deepcopy
import numpy as np
import torch
from gym import make
from torch import nn
from torch.distributions import Normal
from torch.optim import Adam
from HW03.agent import transform_state, Agent
N_STEP = 1
GAMMA = 0.9
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
UPDATE_LENGTH = 10
ENTROPY = 0.01
TARGET_UPDATE = 800
EPOSIODE_LEN = 200
class A2C:
def __init__(self, state_dim, action_dim):
self.gamma = GAMMA ** N_STEP
self.actor = Agent.generate_model().to(DEVICE) # Torch model
self.critic = nn.Sequential(nn.Linear(3, 256), nn.ReLU(), nn.Linear(256, 1)).to(DEVICE) # Torch model
self.actor_optimizer = Adam(self.actor.parameters(), lr=0.0001)
self.critic_optimizer = Adam(self.critic.parameters(), lr=0.001)
self.states = []
self.actions = []
self.next_states = []
self.rewards = []
self.log_probs = []
self.entropies = []
self.critic_values = []
self.target_values = []
self.update_steps = 0
self.distribution = None
self.target = deepcopy(self.critic)
def optimizer_step(self, log_probs, entropies, returns, critic_values):
self.actor_optimizer.zero_grad()
self.critic_optimizer.zero_grad()
adv = (returns - critic_values).detach()
policy_loss = -(log_probs * adv).mean()
entropy_loss = -entropies.mean() * ENTROPY
value_loss = ((critic_values - returns) ** 2 / 2).mean()
total_loss = value_loss + entropy_loss + policy_loss
total_loss.backward()
self.actor_optimizer.step()
self.critic_optimizer.step()
def update(self, transition):
self.update_steps += 1
if self.update_steps % TARGET_UPDATE == 0:
self.target = deepcopy(self.critic)
state, action, next_state, reward, done = transition
action = torch.tensor(action)
self.states.append(transform_state(state))
self.actions.append(action)
self.next_states.append(transform_state(next_state))
self.rewards.append((reward + 8.1) / 8.1)
self.log_probs.append(self.distribution.log_prob(action))
self.entropies.append(self.distribution.entropy())
self.critic_values.append(self.critic(transform_state(state)))
self.target_values.append(self.target(transform_state(state)))
if done or len(self.states) == UPDATE_LENGTH:
next_target = torch.zeros(1) if done else self.target(self.next_states[-1])
if len(self.target_values) > 1:
returns = torch.tensor(self.rewards).view(-1, 1) + \
self.gamma * torch.cat((torch.cat(self.target_values[1:]), next_target)).view(-1, 1).detach()
else:
self.states = []
self.actions = []
self.next_states = []
self.rewards = []
self.log_probs = []
self.entropies = []
self.critic_values = []
self.target_values = []
return
self.optimizer_step(
torch.cat(self.log_probs).view(-1, 1),
torch.cat(self.entropies).view(-1, 1),
returns,
torch.cat(self.critic_values).view(-1, 1)
)
self.states = []
self.actions = []
self.next_states = []
self.rewards = []
self.log_probs = []
self.entropies = []
self.critic_values = []
self.target_values = []
def act(self, state):
# Remember: agent is not deterministic, sample actions from distribution (e.g. Gaussian)
state = transform_state(state)
out = self.actor(state)
self.distribution = Normal(out[0], out[1])
return np.array([self.distribution.sample().item()])
def save(self, i):
torch.save(self.actor.state_dict(), f'agent_{i}.pkl')
if __name__ == "__main__":
env = make("Pendulum-v0")
a2c = A2C(state_dim=3, action_dim=1)
episodes = 10000
scores = []
best_score = -10000.0
best_score_25 = -10000.0
total_steps = 0
start = time.time()
for i in range(episodes):
state = env.reset()
total_reward = 0
steps = 0
done = False
reward_buffer = deque(maxlen=N_STEP)
state_buffer = deque(maxlen=N_STEP)
action_buffer = deque(maxlen=N_STEP)
while not done:
if steps == EPOSIODE_LEN:
break
total_steps += 1
action = a2c.act(state)
next_state, reward, done, _ = env.step(action)
next_state = next_state
total_reward += reward
steps += 1
reward_buffer.append(reward)
state_buffer.append(state)
action_buffer.append(action)
if len(reward_buffer) == N_STEP:
a2c.update((state_buffer[0], action_buffer[0], next_state, sum([(GAMMA ** i) * r for i, r in enumerate(reward_buffer)]), done))
state = next_state
#env.render()
scores.append(total_reward)
if len(reward_buffer) == N_STEP:
rb = list(reward_buffer)
for k in range(1, N_STEP):
a2c.update((state_buffer[k], action_buffer[k], next_state, sum([(GAMMA ** i) * r for i, r in enumerate(rb[k:])]), done))
if (i + 1) % 75 == 0:
current_score = np.mean(scores)
print(f'Current score: {current_score}')
scores = []
if current_score > best_score:
best_score = current_score
a2c.save(75)
print(f'Best model saved with score: {best_score}')
end = time.time()
elapsed = end - start
start = end
print(f'Elapsed time: {elapsed}')
#elif (i + 1) % 25 == 0:
# current_score_25 = np.mean(scores[-25:])
# print(f'Intermediate score: {current_score_25}')
# if current_score_25 > best_score_25:
# best_score_25 = current_score_25
# a2c.save(25)
# print(f'Best 25 model saved with score: {best_score_25}')
|
from datetime import datetime
from uuid import uuid4
from django.contrib.auth import authenticate
from rest_framework.exceptions import NotFound
from api.exceptions import WithdrawalUser, AlreadyLogout
from apps.users.models import User, UserSession
class UserService(object):
def check_username(self, username: str) -> bool:
return User.objects.filter(username=username, withdrawal__isnull=True).exists()
def login(self, user_data: dict, ip: str) -> dict:
# user๊ฐ ์กด์ฌํ์ง ์๊ฑฐ๋, user๋ ์กด์ฌํ์ง๋ง ํํดํ ๊ฒฝ์ฐ Login ์๋จ
user = authenticate(username=user_data.get('username'), password=user_data.get('password'))
if user is None:
raise NotFound
elif user.withdrawal:
raise WithdrawalUser
# UserSession ์์ฑ
user_session = UserSession.objects.create(
user_id=user.id,
ip=ip,
uuid=uuid4().__str__()
)
data = {
'id': user_session.id,
'user_id': user_session.user_id,
'ip': user_session.ip,
'uuid': user_session.uuid,
}
return data
def logout(self, data: dict):
try:
user_session = UserSession.objects.prefetch_related('user').get(uuid=data.get('uuid'))
except UserSession.DoesNotExist:
raise NotFound
if user_session.last_logout:
raise AlreadyLogout
now = datetime.now()
user_session.last_logout = now
user_session.save(update_fields=['last_logout'])
user_session.user.last_logout = now
user_session.user.save(update_fields=['last_logout'])
return data
def withdraw(self, data: dict):
try:
user_session = UserSession.objects.prefetch_related('user').get(uuid=data.get('uuid'))
except UserSession.DoesNotExist:
raise NotFound
if user_session.user.withdrawal:
raise WithdrawalUser
# ํํด + last_logout ์
๋ฐ์ดํธ
now = datetime.now()
user_session.last_logout = now
user_session.save(update_fields=['last_logout'])
"""
username ์ ์ฌ์ ์ํด์ unique=False๋ก ์ ํ๋ ค๋ค๊ฐ ๊ด๋ฆฌ๊ฐ ์๋๋๊ฒฝ์ฐ ๋์ผํ username ์์ฑ ํ ๊ผฌ์ด๋ ๊ฒฝ์ฐ๊ฐ ๋ฐ์๋ ๊ฒ ๊ฐ์
ํ์ํํด์ username=None, withdrawal_username์ usename์ ๊ธฐ๋กํ์ต๋๋ค.
- ํํด์ธ ์ ์ : withdrawal + withdrawal_username, username=None )
"""
user_session.user.last_logout = now
user_session.user.withdrawal = now
user_session.user.withdrawal_username = user_session.user.username
user_session.user.username = None
user_session.user.save(update_fields=['withdrawal', 'withdrawal_username', 'last_logout', 'username'])
data = {
'id': user_session.user.id,
'created': user_session.user.created,
'withdrawal': user_session.user.withdrawal,
'withdrawal_username': user_session.user.withdrawal_username,
}
return data
|
import numpy as np
def Mahalanobis_distance(x, mu, M):
"""
Calculating the Mahalanobis distance between x and mu, MD(x,mu), in a space with metric M.
------------PARAMETERS------------
@param simul : Number of simulations
@param x : First vector
@param mu : Second vector, usually containing the expected values
@param M : The inverse of the covariance matrix
@returns : The Mahalanobis distance between x and mu MD(x,mu)
----------------------------------
"""
delta = np.array(x)-np.array(mu)
#MD(x,mu) = (delta*M*delta)^{1/2}
return np.sqrt(np.dot(np.dot(delta,M),delta))
def multivariate_hypergeometric_Mahalanobis_distance(x, mu, m):
"""
Calculating the Mahalanobis distance between x and mu, MD(x,mu), in a metric space defined by a multivariate hypergeometric distribution defined using the vector m.
------------PARAMETERS------------
@param simul : Number of simulations
@param x : First vector
@param mu : Second vector, usually containing the expected values
@param m : Vector containing the number of items in each category
@returns : The MD(x,mu) and a vector containg the contributions to the square of the MD(x,mu)
----------------------------------
"""
MD=[]
N = sum(m)
n = sum(x)
#if n!=sum(mu):
# print("ERROR: The numer of element in each ranking vector must be the same")
# print("Please correct")
# return None
gamma = n*(N-n)/(N-1)/N/N
for i in range(len(x)):
MD.append(pow((x-mu)[i],2)/N/m[i])
return np.sqrt(sum(MD)/gamma), np.array(MD)/gamma
|
import random
'''
def get_trade(r_sold, q_sold, i_buying, agent_id):
return {
'r_sold': r_sold,
'q_sold': q_sold,
'i_buying': i_buying,
'agent_id': agent_id,
'action_id': 'Trade'
}
def get_arb_market(settle_asset, agent_id):
return {
'settle_asset': settle_asset,
'agent_id': agent_id,
'action_id': 'ArbMarket'
}
def get_liq_add(r_deposit, agent_id):
return {
'r_deposit': r_deposit,
'agent_id': agent_id,
'action_id': 'AddLiquidity'
}
def get_liq_rem(s_burn, agent_id):
return {
's_burn': s_burn,
'agent_id': agent_id,
'action_id': 'RemoveLiquidity'
}
def get_liq_rem_percentage(r_percent, agent_id):
return {
'r_percent': r_percent,
'agent_id': agent_id,
'action_id': 'RemoveLiquidityPercent'
}
'''
def get_action_list(init_list, action_dict, seed=42):
# getting agent paths here standardizes the agent actions across different tests
random.seed(seed)
action_list = []
for x in init_list:
if x[0] not in action_dict:
action_list.extend([x[0]] * x[1])
else:
action_d = action_dict[x[0]]
action_list_x = random.choices(list(action_d.keys()), weights=list(action_d.values()), k=x[1])
action_list.extend(action_list_x)
return action_list
|
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Evaluation for FasterRcnn"""
import os
import argparse
import time
import numpy as np
from pycocotools.coco import COCO
from mindspore import context
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.common import set_seed, Parameter
from src.FasterRcnn.faster_rcnn_r50 import Faster_Rcnn_Resnet50
from src.config import config
from src.dataset import data_to_mindrecord_byte_image, create_fasterrcnn_dataset
from src.util import coco_eval, bbox2result_1image, results2json
set_seed(1)
parser = argparse.ArgumentParser(description="FasterRcnn evaluation")
parser.add_argument("--dataset", type=str, default="coco", help="Dataset, default is coco.")
parser.add_argument("--ann_file", type=str, default="val.json", help="Ann file, default is val.json.")
parser.add_argument("--checkpoint_path", type=str, required=True, help="Checkpoint file path.")
parser.add_argument("--device_target", type=str, default="Ascend",
help="device where the code will be implemented, default is Ascend")
parser.add_argument("--device_id", type=int, default=0, help="Device id, default is 0.")
args_opt = parser.parse_args()
context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target, device_id=args_opt.device_id)
def fasterrcnn_eval(dataset_path, ckpt_path, ann_file):
"""FasterRcnn evaluation."""
ds = create_fasterrcnn_dataset(dataset_path, batch_size=config.test_batch_size, is_training=False)
net = Faster_Rcnn_Resnet50(config)
param_dict = load_checkpoint(ckpt_path)
if args_opt.device_target == "GPU":
for key, value in param_dict.items():
tensor = value.asnumpy().astype(np.float32)
param_dict[key] = Parameter(tensor, key)
load_param_into_net(net, param_dict)
net.set_train(False)
eval_iter = 0
total = ds.get_dataset_size()
outputs = []
dataset_coco = COCO(ann_file)
print("\n========================================\n")
print("total images num: ", total)
print("Processing, please wait a moment.")
max_num = 128
for data in ds.create_dict_iterator(num_epochs=1):
eval_iter = eval_iter + 1
img_data = data['image']
img_metas = data['image_shape']
gt_bboxes = data['box']
gt_labels = data['label']
gt_num = data['valid_num']
start = time.time()
# run net
output = net(img_data, img_metas, gt_bboxes, gt_labels, gt_num)
end = time.time()
print("Iter {} cost time {}".format(eval_iter, end - start))
# output
all_bbox = output[0]
all_label = output[1]
all_mask = output[2]
for j in range(config.test_batch_size):
all_bbox_squee = np.squeeze(all_bbox.asnumpy()[j, :, :])
all_label_squee = np.squeeze(all_label.asnumpy()[j, :, :])
all_mask_squee = np.squeeze(all_mask.asnumpy()[j, :, :])
all_bboxes_tmp_mask = all_bbox_squee[all_mask_squee, :]
all_labels_tmp_mask = all_label_squee[all_mask_squee]
if all_bboxes_tmp_mask.shape[0] > max_num:
inds = np.argsort(-all_bboxes_tmp_mask[:, -1])
inds = inds[:max_num]
all_bboxes_tmp_mask = all_bboxes_tmp_mask[inds]
all_labels_tmp_mask = all_labels_tmp_mask[inds]
outputs_tmp = bbox2result_1image(all_bboxes_tmp_mask, all_labels_tmp_mask, config.num_classes)
outputs.append(outputs_tmp)
eval_types = ["bbox"]
result_files = results2json(dataset_coco, outputs, "./results.pkl")
coco_eval(result_files, eval_types, dataset_coco, single_result=True)
if __name__ == '__main__':
prefix = "FasterRcnn_eval.mindrecord"
mindrecord_dir = config.mindrecord_dir
mindrecord_file = os.path.join(mindrecord_dir, prefix)
print("CHECKING MINDRECORD FILES ...")
if not os.path.exists(mindrecord_file):
if not os.path.isdir(mindrecord_dir):
os.makedirs(mindrecord_dir)
if args_opt.dataset == "coco":
if os.path.isdir(config.coco_root):
print("Create Mindrecord. It may take some time.")
data_to_mindrecord_byte_image("coco", False, prefix, file_num=1)
print("Create Mindrecord Done, at {}".format(mindrecord_dir))
else:
print("coco_root not exits.")
else:
if os.path.isdir(config.IMAGE_DIR) and os.path.exists(config.ANNO_PATH):
print("Create Mindrecord. It may take some time.")
data_to_mindrecord_byte_image("other", False, prefix, file_num=1)
print("Create Mindrecord Done, at {}".format(mindrecord_dir))
else:
print("IMAGE_DIR or ANNO_PATH not exits.")
print("CHECKING MINDRECORD FILES DONE!")
print("Start Eval!")
fasterrcnn_eval(mindrecord_file, args_opt.checkpoint_path, args_opt.ann_file)
|
import os
import json
import logging
from ..core.logger import setup_logger
from ..benchmarks.utils import run_benchmarks
from ..core.utils import load_class
LOGGER = logging.getLogger()
def run_evaluation(config):
logger_path = os.path.join(config['benchmark_tmp_root'], 'logger')
os.makedirs(config['benchmark_tmp_root'], exist_ok=True)
setup_logger(out_file=logger_path, stderr_level=logging.DEBUG)
LOGGER.info(config)
feature_extractor = load_class(config["feature_extractor_class"])(**config["feature_extractor_class_kwargs"])
splits = json.load(open(os.path.join(config['dataset_path'], "splits.json"), "r"))
train_dataset = load_class(
config["feature_extractor_dataset_class"])(data_path=config['dataset_path'],
airports=splits['train'],
**config["feature_extractor_train_dataset_kwargs"])
val_dataset = load_class(
config["feature_extractor_dataset_class"])(data_path=config['dataset_path'],
airports=splits['val'],
**config["feature_extractor_train_dataset_kwargs"])
try:
if config['train_extractor']:
feature_extractor.train(train_dataset, val_dataset)
else:
feature_extractor.load_weights()
except NotImplementedError:
pass
scores = run_benchmarks(feature_extractor, config)
LOGGER.info("Final Benchmark scores:\n" + str(scores))
|
"""
Parse spotify URLs
"""
from __future__ import unicode_literals, print_function, division
import re
import logging
log = logging.getLogger('spotify')
def handle_privmsg(bot, user, channel, args):
"""Grab Spotify URLs from the messages and handle them"""
m = re.match(".*(https?:\/\/open.spotify.com\/|spotify:)(?P<item>album|artist|track|user[:\/]\S+[:\/]playlist)[:\/](?P<id>[a-zA-Z0-9]+)\/?.*", args)
if not m:
return None
spotify_id = m.group('id')
item = m.group('item').replace(':', '/').split('/')
item[0] += 's'
if item[0] == 'users':
# All playlists seem to return 401 at the time, even the public ones
return None
apiurl = "https://api.spotify.com/v1/%s/%s" % ('/'.join(item), spotify_id)
r = bot.get_url(apiurl)
if r.status_code != 200:
if r.status_code not in [401, 403]:
log.warning('Spotify API returned %s while trying to fetch %s' % r.status_code, apiurl)
return
data = r.json()
title = '[Spotify] '
if item[0] in ['albums', 'tracks']:
artists = []
for artist in data['artists']:
artists.append(artist['name'])
title += ', '.join(artists)
if item[0] == 'albums':
title += ' - %s (%s)' % (data['name'], data['release_date'])
if item[0] == 'artists':
title += data['name']
genres_n = len(data['genres'])
if genres_n > 0:
genitive = 's' if genres_n > 1 else ''
genres = data['genres'][0:4]
more = ' +%s more' % genres_n - 5 if genres_n > 4 else ''
title += ' (Genre%s: %s%s)' % (genitive, ', '.join(genres), more)
if item[0] == 'tracks':
title += ' - %s - %s' % (data['album']['name'], data['name'])
return bot.say(channel, title)
|
from io import BytesIO
from zipfile import ZipFile
import requests
import os
from utilities.get_or_create_temporary_directory import get_temporary_directory as get_temp
def get_file_from_server(url, return_directory, **kwargs):
"""
This accepts a a URL and (ii) retrieves a zipped shapefile from the URL.
:param return_directory:
:param url: URL of zip file
:return: a list of files from th zip file
"""
valid_formats = {
"CSV": "text/csv",
"SHAPE-ZIP": "application/zip",
"JSON": "application/json"
}
try:
response = requests.get(url)
if 200 <= response.status_code <= 299:
if not response.headers["Content-Type"]:
raise ValueError("Couldn't figure out what type this is, sorry.")
content_type = [item.strip().split("=") for item in
response.headers["Content-Type"].split(";")]
if content_type[0][0] not in valid_formats.values():
raise ValueError(f"Looks like an invalid content type: {response.headers['Content-Type']}")
if content_type[0][0] == "application/zip":
my_zipfile = ZipFile(BytesIO(response.content))
my_zipfile.extractall(path=return_directory)
return return_directory, my_zipfile.namelist()
else:
content_disposition = [item.strip().split("=") for item in
response.headers["Content-Disposition"].split(";")]
for item in content_type + content_disposition:
if len(item) == 2:
locals()[item[0]] = item[1]
if "filename" in kwargs:
locals()["filename"] = kwargs["filename"]
if not locals()["filename"]:
raise ValueError("Got data but couldn't find a filename for it.")
with open(os.path.join(return_directory, locals()["filename"]),
mode="w", encoding=locals().get("charset", "utf-8")) as fh:
fh.write(response.text)
return return_directory, locals()["filename"]
else:
raise ValueError(f"Bad status code: {response.status_code}")
except Exception as e:
print(f"{e}")
quit(1)
def main():
DEFAULT_FORMAT = {
"geoserver": "https://markfoley.info/geoserver",
"workspace": "census2011",
"dataset": "counties",
"output_format": "SHAPE-ZIP"
}
geoserver_target = {}
geoserver_target["geoserver"] = \
input(f"Input Geoserver URL or press ENTER for {DEFAULT_FORMAT['geoserver']} ") or DEFAULT_FORMAT[
'geoserver']
geoserver_target["workspace"] = \
input(f"Input Workspace or press ENTER for {DEFAULT_FORMAT['workspace']} ") or DEFAULT_FORMAT['workspace']
geoserver_target["dataset"] = \
input(f"Input Data Set or press ENTER for {DEFAULT_FORMAT['dataset']} ") or DEFAULT_FORMAT['dataset']
geoserver_target["output_format"] = \
input(f"Output Format or press ENTER for {DEFAULT_FORMAT['output_format']} ") or DEFAULT_FORMAT['output_format']
geoserver_target["output_format"] = geoserver_target["output_format"].upper()
my_temp_directory = get_temp(__file__)
url = f"{geoserver_target['geoserver']}/{geoserver_target['workspace']}/ows?service=WFS&version=1.0.0&" \
f"request=GetFeature&typeName={geoserver_target['workspace']}:{geoserver_target['dataset']}&" \
f"outputFormat={geoserver_target['output_format']}"
my_files = get_file_from_server(url, my_temp_directory)
print(my_files)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
import sqlalchemy as sa
from sqlalchemy.orm import declarative_base
import sqlalchemy_mate as sam
Base = declarative_base()
class User(Base, sam.ExtendedBase):
__tablename__ = "users"
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String, nullable=True)
t_users = User.__table__
engine = sam.EngineCreator().create_sqlite()
Base.metadata.create_all(engine)
User.smart_insert(
engine,
[
User(id=1, name="Alice"),
User(id=2, name="Bob"),
User(id=3, name="Cathy"),
User(id=4, name="David"),
User(id=5, name="Edward"),
User(id=6, name="Frank"),
User(id=7, name="George"),
]
)
# from ORM class
print(sam.pt.from_everything(User, engine))
# from Table
print(sam.pt.from_everything(t_users, engine, limit=3))
# from ORM styled select statement
print(sam.pt.from_everything(
sa.select(User.name).where(User.id >= 4).limit(2),
engine,
))
# from SQL expression styled select statement
print(sam.pt.from_everything(
sa.select(t_users.c.name).where(User.id >= 4),
engine
))
# from Raw SQL text
print(sam.pt.from_everything(
"SELECT id FROM users WHERE name = 'Edward'",
engine
))
# from list of dict
print(sam.pt.from_everything([
{"id": 1, "name": "Alice"},
{"id": 2, "name": "Bob"},
{"id": 3, "name": "Cathy"},
]))
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
# VOC_CLASSES = ( '__background__', # always index 0
VOC_CLASSES = (
"bus",
"car",
"motorcycle",
"truck"
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.