hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f71ab63c599cebc6ea849c3b34a83ddb2a9b964d | 1,266 | py | Python | tests/test_experimental.py | daoluan/pandaSDMX | 2efcb5a429a5306efd89bed4cd55946d1ad5067b | [
"Apache-2.0"
] | null | null | null | tests/test_experimental.py | daoluan/pandaSDMX | 2efcb5a429a5306efd89bed4cd55946d1ad5067b | [
"Apache-2.0"
] | null | null | null | tests/test_experimental.py | daoluan/pandaSDMX | 2efcb5a429a5306efd89bed4cd55946d1ad5067b | [
"Apache-2.0"
] | null | null | null | """Tests for experimental code using pandas objects for internal storage.
See pandasdmx.experimental for more information.
"""
from pandasdmx.experimental import DataSet as PandasDataSet
from pandasdmx.model import (
AttributeValue,
DataAttribute,
DataSet,
Key,
Observation,
)
import pytest
pytestmark = pytest.mark.experimental
# Run the tests on both the standard DataSet class, and the experimental,
# PandasDataSet version
@pytest.mark.parametrize('DataSetType', [DataSet, PandasDataSet])
def test_add_obs(DataSetType):
# Create a Key and Attributes
key = Key(CURRENCY='NZD', CURRENCY_DENOM='EUR',
TIME_PERIOD='2018-01-01')
obs_status = DataAttribute(id='OBS_STATUS')
attr = {'OBS_STATUS': AttributeValue(value_for=obs_status, value='A')}
obs = []
for day, value in enumerate([5, 6, 7]):
key = key.copy(TIME_PERIOD='2018-01-{:02d}'.format(day))
obs.append(Observation(dimension=key, value=value,
attached_attribute=attr))
ds = DataSetType()
ds.add_obs(obs)
# PandasDataSet does not store Observation objects internally, but should
# emit them when the .obs property is accessed
assert all(a == b for a, b in zip(ds.obs, obs))
| 31.65 | 77 | 0.690363 | from pandasdmx.experimental import DataSet as PandasDataSet
from pandasdmx.model import (
AttributeValue,
DataAttribute,
DataSet,
Key,
Observation,
)
import pytest
pytestmark = pytest.mark.experimental
@pytest.mark.parametrize('DataSetType', [DataSet, PandasDataSet])
def test_add_obs(DataSetType):
key = Key(CURRENCY='NZD', CURRENCY_DENOM='EUR',
TIME_PERIOD='2018-01-01')
obs_status = DataAttribute(id='OBS_STATUS')
attr = {'OBS_STATUS': AttributeValue(value_for=obs_status, value='A')}
obs = []
for day, value in enumerate([5, 6, 7]):
key = key.copy(TIME_PERIOD='2018-01-{:02d}'.format(day))
obs.append(Observation(dimension=key, value=value,
attached_attribute=attr))
ds = DataSetType()
ds.add_obs(obs)
assert all(a == b for a, b in zip(ds.obs, obs))
| true | true |
f71ab6ca83c0cccdc98f7bc0e6a9815f90dc10b0 | 4,878 | py | Python | bamboo/unit_tests/test_unit_layer_gather.py | steffi7574/lbann | 6a6b86d3cbcf4ca50730c652a5014f7cb3afa5e6 | [
"Apache-2.0"
] | null | null | null | bamboo/unit_tests/test_unit_layer_gather.py | steffi7574/lbann | 6a6b86d3cbcf4ca50730c652a5014f7cb3afa5e6 | [
"Apache-2.0"
] | 5 | 2021-07-15T20:51:21.000Z | 2022-01-01T03:18:05.000Z | bamboo/unit_tests/test_unit_layer_gather.py | ekmixon/lbann | 665797a112dc96d15bd1d958de61f48bf5d3d21f | [
"Apache-2.0"
] | null | null | null | import functools
import operator
import os
import os.path
import sys
import numpy as np
# Bamboo utilities
current_file = os.path.realpath(__file__)
current_dir = os.path.dirname(current_file)
sys.path.insert(0, os.path.join(os.path.dirname(current_dir), 'common_python'))
import tools
# ==============================================
# Objects for Python data reader
# ==============================================
# Note: The Python data reader imports this file as a module and calls
# the functions below to ingest data.
# Data
input_size = 23
output_size = 15
seed = 202101280
# Sample access functions
def get_sample(index):
np.random.seed(seed+index)
values = [np.random.normal() for _ in range(input_size)]
indices = [
np.random.uniform(-1, input_size+1)
for _ in range(output_size)
]
return values + indices
def num_samples():
return 25
def sample_dims():
return (input_size+output_size,)
# ==============================================
# Setup LBANN experiment
# ==============================================
def setup_experiment(lbann):
"""Construct LBANN experiment.
Args:
lbann (module): Module for LBANN Python frontend
"""
mini_batch_size = num_samples() // 2
trainer = lbann.Trainer(mini_batch_size)
model = construct_model(lbann)
data_reader = construct_data_reader(lbann)
optimizer = lbann.NoOptimizer()
return trainer, model, data_reader, optimizer
def construct_model(lbann):
"""Construct LBANN model.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Input data
# Note: Sum with a weights layer so that gradient checking will
# verify that error signals are correct.
x = lbann.Identity(lbann.Input())
x_slice = lbann.Slice(
x,
slice_points=tools.str_list([0,input_size,input_size+output_size]),
)
x0_weights = lbann.Weights(
optimizer=lbann.SGD(),
initializer=lbann.ConstantInitializer(value=0.0),
name='input_weights',
)
x0 = lbann.Sum(
lbann.Identity(x_slice),
lbann.WeightsLayer(weights=x0_weights, dims=tools.str_list(input_size)),
)
x1 = lbann.Identity(x_slice)
# Apply gather
y0 = lbann.Gather(x0, x1)
y1 = lbann.Concatenation([
lbann.Constant(value=i+1, num_neurons='1')
for i in range(output_size)
])
y = lbann.Multiply(y0, y1)
z = lbann.L2Norm2(y)
# Objects for LBANN model
layers = list(lbann.traverse_layer_graph(x))
metric = lbann.Metric(z, name='obj')
obj = lbann.ObjectiveFunction(z)
callbacks = []
# Compute expected metric value
vals = []
for i in range(num_samples()):
x = get_sample(i)
x0 = x[:input_size]
x1 = x[input_size:]
y0 = np.zeros(output_size)
for i in range(output_size):
if 0 <= x1[i] < input_size:
y0[i] = x0[int(x1[i])]
z = 0
for i in range(output_size):
z += ((i+1)*y0[i]) ** 2
vals.append(z)
val = np.mean(vals)
tol = 8 * val * np.finfo(np.float32).eps
callbacks.append(lbann.CallbackCheckMetric(
metric=metric.name,
lower_bound=val-tol,
upper_bound=val+tol,
error_on_failure=True,
execution_modes='test'))
# Gradient checking
callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))
# Construct model
num_epochs = 0
return lbann.Model(num_epochs,
layers=layers,
objective_function=obj,
metrics=[metric],
callbacks=callbacks)
def construct_data_reader(lbann):
"""Construct Protobuf message for Python data reader.
The Python data reader will import the current Python file to
access the sample access functions.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Note: The training data reader should be removed when
# https://github.com/LLNL/lbann/issues/1098 is resolved.
message = lbann.reader_pb2.DataReader()
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'train'
)
])
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'test'
)
])
return message
# ==============================================
# Setup PyTest
# ==============================================
# Create test functions that can interact with PyTest
for _test_func in tools.create_tests(setup_experiment, __file__):
globals()[_test_func.__name__] = _test_func
| 27.715909 | 80 | 0.593276 | import functools
import operator
import os
import os.path
import sys
import numpy as np
current_file = os.path.realpath(__file__)
current_dir = os.path.dirname(current_file)
sys.path.insert(0, os.path.join(os.path.dirname(current_dir), 'common_python'))
import tools
input_size = 23
output_size = 15
seed = 202101280
def get_sample(index):
np.random.seed(seed+index)
values = [np.random.normal() for _ in range(input_size)]
indices = [
np.random.uniform(-1, input_size+1)
for _ in range(output_size)
]
return values + indices
def num_samples():
return 25
def sample_dims():
return (input_size+output_size,)
def setup_experiment(lbann):
mini_batch_size = num_samples() // 2
trainer = lbann.Trainer(mini_batch_size)
model = construct_model(lbann)
data_reader = construct_data_reader(lbann)
optimizer = lbann.NoOptimizer()
return trainer, model, data_reader, optimizer
def construct_model(lbann):
x = lbann.Identity(lbann.Input())
x_slice = lbann.Slice(
x,
slice_points=tools.str_list([0,input_size,input_size+output_size]),
)
x0_weights = lbann.Weights(
optimizer=lbann.SGD(),
initializer=lbann.ConstantInitializer(value=0.0),
name='input_weights',
)
x0 = lbann.Sum(
lbann.Identity(x_slice),
lbann.WeightsLayer(weights=x0_weights, dims=tools.str_list(input_size)),
)
x1 = lbann.Identity(x_slice)
y0 = lbann.Gather(x0, x1)
y1 = lbann.Concatenation([
lbann.Constant(value=i+1, num_neurons='1')
for i in range(output_size)
])
y = lbann.Multiply(y0, y1)
z = lbann.L2Norm2(y)
layers = list(lbann.traverse_layer_graph(x))
metric = lbann.Metric(z, name='obj')
obj = lbann.ObjectiveFunction(z)
callbacks = []
vals = []
for i in range(num_samples()):
x = get_sample(i)
x0 = x[:input_size]
x1 = x[input_size:]
y0 = np.zeros(output_size)
for i in range(output_size):
if 0 <= x1[i] < input_size:
y0[i] = x0[int(x1[i])]
z = 0
for i in range(output_size):
z += ((i+1)*y0[i]) ** 2
vals.append(z)
val = np.mean(vals)
tol = 8 * val * np.finfo(np.float32).eps
callbacks.append(lbann.CallbackCheckMetric(
metric=metric.name,
lower_bound=val-tol,
upper_bound=val+tol,
error_on_failure=True,
execution_modes='test'))
callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))
num_epochs = 0
return lbann.Model(num_epochs,
layers=layers,
objective_function=obj,
metrics=[metric],
callbacks=callbacks)
def construct_data_reader(lbann):
message = lbann.reader_pb2.DataReader()
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'train'
)
])
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'test'
)
])
return message
for _test_func in tools.create_tests(setup_experiment, __file__):
globals()[_test_func.__name__] = _test_func
| true | true |
f71ab703aedaaca8057f1f775130036f5d78f355 | 1,470 | py | Python | datasets/imagename_dataset.py | bigvideoresearch/SCC | f26cdb6aaf248b5112812dbdac1f1b5086aebccc | [
"MIT"
] | 5 | 2021-09-15T21:48:55.000Z | 2022-03-22T11:21:58.000Z | datasets/imagename_dataset.py | bigvideoresearch/SCC | f26cdb6aaf248b5112812dbdac1f1b5086aebccc | [
"MIT"
] | null | null | null | datasets/imagename_dataset.py | bigvideoresearch/SCC | f26cdb6aaf248b5112812dbdac1f1b5086aebccc | [
"MIT"
] | 1 | 2021-08-20T08:40:15.000Z | 2021-08-20T08:40:15.000Z | from runner_master import runner
import os
import io
import torch
import logging
from PIL import Image, ImageFile
from runner_master.runner.data import datasets
# to fix "OSError: image file is truncated"
ImageFile.LOAD_TRUNCATED_IMAGES = True
class ImagenameDataset(datasets.ImglistDatasetV2):
def getitem(self, index):
line = self.imglist[index].strip('\n')
tokens = line.split(' ', maxsplit=1)
#if len(tokens) != 2:
# raise RuntimeError('split tokens < 2')
image_name, extra_str = tokens[0], tokens[1]
if self.root != '' and image_name.startswith('/'):
raise RuntimeError('root not empty but image_name starts with "/"')
path = os.path.join(self.root, image_name)
sample = dict()
sample['image_name'] = image_name
try:
if not self.dummy_read:
filebytes = self.reader(path)
buff = io.BytesIO(filebytes)
if self.dummy_size is not None:
sample['data'] = torch.rand(self.dummy_size)
else:
image = Image.open(buff)
sample['data'] = self.transform_image(image)
for key, value in self.transform_extra(extra_str).items():
sample[key] = value
except Exception as e:
logging.error('[{}] broken'.format(path))
raise e
return sample
runner.patch_dataset('ImagenameDataset', ImagenameDataset)
| 35 | 79 | 0.612245 | from runner_master import runner
import os
import io
import torch
import logging
from PIL import Image, ImageFile
from runner_master.runner.data import datasets
ImageFile.LOAD_TRUNCATED_IMAGES = True
class ImagenameDataset(datasets.ImglistDatasetV2):
def getitem(self, index):
line = self.imglist[index].strip('\n')
tokens = line.split(' ', maxsplit=1)
image_name, extra_str = tokens[0], tokens[1]
if self.root != '' and image_name.startswith('/'):
raise RuntimeError('root not empty but image_name starts with "/"')
path = os.path.join(self.root, image_name)
sample = dict()
sample['image_name'] = image_name
try:
if not self.dummy_read:
filebytes = self.reader(path)
buff = io.BytesIO(filebytes)
if self.dummy_size is not None:
sample['data'] = torch.rand(self.dummy_size)
else:
image = Image.open(buff)
sample['data'] = self.transform_image(image)
for key, value in self.transform_extra(extra_str).items():
sample[key] = value
except Exception as e:
logging.error('[{}] broken'.format(path))
raise e
return sample
runner.patch_dataset('ImagenameDataset', ImagenameDataset)
| true | true |
f71ab7463ba7c30d460e7f06958ca0812996c4f2 | 1,439 | py | Python | setup.py | genevera/slack-backup | 0ffb9f940608c364249d027c0f96ecf08dd7e59a | [
"BSD-3-Clause"
] | null | null | null | setup.py | genevera/slack-backup | 0ffb9f940608c364249d027c0f96ecf08dd7e59a | [
"BSD-3-Clause"
] | null | null | null | setup.py | genevera/slack-backup | 0ffb9f940608c364249d027c0f96ecf08dd7e59a | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
"""
Setup for the slack-backup project
"""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(name="slack-backup",
packages=["slack_backup"],
version="0.7",
description="Make copy of slack converstaions",
author="Roman Dobosz",
author_email="gryf73@gmail.com",
url="https://github.com/gryf/slack-backup",
download_url="https://github.com/gryf/slack-backup",
keywords=["chat", "backup", "history", "slack"],
install_requires=["sqlalchemy", "slackclient"],
scripts=["scripts/slack-backup"],
classifiers=["Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Database :: Front-Ends",
"Topic :: Communications :: Chat",
"Topic :: Text Processing :: Markup",
"Topic :: Text Processing :: Markup :: HTML"],
long_description=open("README.rst").read(),
options={'test': {'verbose': False,
'coverage': False}})
| 38.891892 | 65 | 0.551077 |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(name="slack-backup",
packages=["slack_backup"],
version="0.7",
description="Make copy of slack converstaions",
author="Roman Dobosz",
author_email="gryf73@gmail.com",
url="https://github.com/gryf/slack-backup",
download_url="https://github.com/gryf/slack-backup",
keywords=["chat", "backup", "history", "slack"],
install_requires=["sqlalchemy", "slackclient"],
scripts=["scripts/slack-backup"],
classifiers=["Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Database :: Front-Ends",
"Topic :: Communications :: Chat",
"Topic :: Text Processing :: Markup",
"Topic :: Text Processing :: Markup :: HTML"],
long_description=open("README.rst").read(),
options={'test': {'verbose': False,
'coverage': False}})
| true | true |
f71ab74bbb37c06ec87292445a3616dd3669f146 | 7,850 | py | Python | openprompt/prompts/one2one_verbalizer.py | hlzhang109/OpenPrompt | 8a1ec1ceac3805a11b09dda9b96ad7406d222f26 | [
"Apache-2.0"
] | null | null | null | openprompt/prompts/one2one_verbalizer.py | hlzhang109/OpenPrompt | 8a1ec1ceac3805a11b09dda9b96ad7406d222f26 | [
"Apache-2.0"
] | null | null | null | openprompt/prompts/one2one_verbalizer.py | hlzhang109/OpenPrompt | 8a1ec1ceac3805a11b09dda9b96ad7406d222f26 | [
"Apache-2.0"
] | null | null | null | import json
from transformers.tokenization_utils import PreTrainedTokenizer
from yacs.config import CfgNode
from openprompt.data_utils.data_utils import InputFeatures
import re
from openprompt import Verbalizer
from typing import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from openprompt.utils.logging import logger
class One2oneVerbalizer(Verbalizer):
r"""
The basic manually defined verbalizer class, this class is inherited from the :obj:`Verbalizer` class.
This class restrict the use of label words to one words per label. For a verbalzer with less constraints,
please use Basic ManualVerbalizer.
Args:
tokenizer (:obj:`PreTrainedTokenizer`): The tokenizer of the current pre-trained model to point out the vocabulary.
classes (:obj:`classes`): The classes (or labels) of the current task.
num_classes (:obj:`int`): Optional. The number of classes of the verbalizer. Only one of `classes` and `num_classes` should be used.
label_words (:obj:`Union[Sequence[str], Mapping[str, str]]`, optional): The label words that are projected by the labels.
prefix (:obj:`str`, optional): The prefix string of the verbalizer. (used in PLMs like RoBERTa, which is sensitive to prefix space)
multi_token_handler (:obj:`str`, optional): The handling strategy for multiple tokens produced by the tokenizer.
"""
def __init__(self,
tokenizer: PreTrainedTokenizer,
num_classes: Optional[int] = None,
classes: Optional[List] = None,
label_words: Optional[Union[Sequence[str], Mapping[str, str]]] = None,
prefix: Optional[str] = " ",
multi_token_handler: Optional[str] = "first",
):
super().__init__(tokenizer=tokenizer, num_classes=num_classes, classes=classes)
self.prefix = prefix
self.multi_token_handler = multi_token_handler
self.label_words = label_words
def on_label_words_set(self):
super().on_label_words_set()
self.label_words = self.add_prefix(self.label_words, self.prefix)
self.generate_parameters()
@staticmethod
def add_prefix(label_words, prefix):
r"""Add prefix to label words. For example, if a label words is in the middle of a template,
the prefix should be ``' '``.
Args:
label_words (:obj:`Union[Sequence[str], Mapping[str, str]]`, optional): The label words that are projected by the labels.
prefix (:obj:`str`, optional): The prefix string of the verbalizer.
Returns:
:obj:`Sequence[str]`: New label words with prefix.
"""
new_label_words = []
if isinstance(label_words[0], list):
assert max([len(w) for w in label_words]) == 1, "Providing multiple label words, you should use other verbalizers instead."
label_words = [w[0] for w in label_words]
for word in label_words:
if word.startswith("<!>"):
new_label_words.append(word.split("<!>")[1])
else:
new_label_words.append(prefix + word)
return new_label_words
def generate_parameters(self) -> List:
r"""In basic manual template, the parameters are generated from label words directly.
In this implementation, the label_words should not be tokenized into more than one token.
"""
words_ids = []
for word in self.label_words:
word_ids = self.tokenizer.encode(word, add_special_tokens=False)
if len(word_ids) > 1:
logger.warning("Word {} is split into multiple tokens: {}. \
If this is not what you expect, try using another word for this verbalizer" \
.format(word, self.tokenizer.convert_ids_to_tokens(word_ids)))
words_ids.append(word_ids)
max_len = max([len(ids) for ids in words_ids])
words_ids_mask = [[1]*len(ids) + [0]*(max_len-len(ids)) for ids in words_ids]
words_ids = [ids+[0]*(max_len-len(ids)) for ids in words_ids]
words_ids_tensor = torch.tensor(words_ids)
words_ids_mask = torch.tensor(words_ids_mask)
self.label_words_ids = nn.Parameter(words_ids_tensor, requires_grad=False)
self.label_words_mask = nn.Parameter(words_ids_mask, requires_grad=False)
def project(self,
logits: torch.Tensor,
**kwargs,
) -> torch.Tensor:
r"""
Project the labels, the return value is the normalized (sum to 1) probs of label words.
Args:
logits (:obj:`torch.Tensor`): The orginal logits of label words.
Returns:
:obj:`torch.Tensor`: The normalized logits of label words
"""
label_words_logits = logits[:, self.label_words_ids]
label_words_logits = self.handle_multi_token(label_words_logits, self.label_words_mask)
return label_words_logits
def process_logits(self, logits: torch.Tensor, **kwargs):
r"""A whole framework to process the original logits over the vocabulary, which contains four steps:
(1) Project the logits into logits of label words
(2) Normalize over all label words
(3) Calibrate (optional)
Args:
logits (:obj:`torch.Tensor`): The orginal logits.
Returns:
(:obj:`torch.Tensor`): The final processed logits over the label words set.
"""
# project
label_words_logits = self.project(logits, **kwargs) #Output: (batch_size, num_classes) or (batch_size, num_classes, num_label_words_per_label)
# normalize
label_words_probs = self.normalize(label_words_logits)
# calibrate
if hasattr(self, "_calibrate_logits") and self._calibrate_logits is not None:
label_words_probs = self.calibrate(label_words_probs=label_words_probs)
# convert to logits
label_words_logits = torch.log(label_words_probs+1e-15)
return label_words_logits
def normalize(self, logits: torch.Tensor) -> torch.Tensor:
"""
Given logits regarding the entire vocabulary, return the probs over the label words set.
Args:
logits (:obj:`Tensor`): The logits over the entire vocabulary.
Returns:
:obj:`Tensor`: The logits over the label words set.
"""
batch_size = logits.shape[0]
return F.softmax(logits.reshape(batch_size, -1), dim=-1).reshape(*logits.shape)
def calibrate(self, label_words_probs: torch.Tensor, **kwargs) -> torch.Tensor:
r"""
Args:
label_words_probs (:obj:`torch.Tensor`): The probability distribution of the label words with the shape of [``batch_size``, ``num_classes``, ``num_label_words_per_class``]
Returns:
:obj:`torch.Tensor`: The calibrated probability of label words.
"""
shape = label_words_probs.shape
assert self._calibrate_logits.dim() == 1, "self._calibrate_logits are not 1-d tensor"
calibrate_label_words_probs = self.normalize(self.project(self._calibrate_logits.unsqueeze(0), **kwargs))
assert calibrate_label_words_probs.shape[1:] == label_words_probs.shape[1:] \
and calibrate_label_words_probs.shape[0]==1, "shape not match"
label_words_probs /= (calibrate_label_words_probs+1e-15)
# normalize # TODO Test the performance
norm = label_words_probs.reshape(shape[0], -1).sum(dim=-1,keepdim=True) # TODO Test the performance of detaching()
label_words_probs /= norm
return label_words_probs
| 44.101124 | 183 | 0.643057 | import json
from transformers.tokenization_utils import PreTrainedTokenizer
from yacs.config import CfgNode
from openprompt.data_utils.data_utils import InputFeatures
import re
from openprompt import Verbalizer
from typing import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from openprompt.utils.logging import logger
class One2oneVerbalizer(Verbalizer):
def __init__(self,
tokenizer: PreTrainedTokenizer,
num_classes: Optional[int] = None,
classes: Optional[List] = None,
label_words: Optional[Union[Sequence[str], Mapping[str, str]]] = None,
prefix: Optional[str] = " ",
multi_token_handler: Optional[str] = "first",
):
super().__init__(tokenizer=tokenizer, num_classes=num_classes, classes=classes)
self.prefix = prefix
self.multi_token_handler = multi_token_handler
self.label_words = label_words
def on_label_words_set(self):
super().on_label_words_set()
self.label_words = self.add_prefix(self.label_words, self.prefix)
self.generate_parameters()
@staticmethod
def add_prefix(label_words, prefix):
new_label_words = []
if isinstance(label_words[0], list):
assert max([len(w) for w in label_words]) == 1, "Providing multiple label words, you should use other verbalizers instead."
label_words = [w[0] for w in label_words]
for word in label_words:
if word.startswith("<!>"):
new_label_words.append(word.split("<!>")[1])
else:
new_label_words.append(prefix + word)
return new_label_words
def generate_parameters(self) -> List:
words_ids = []
for word in self.label_words:
word_ids = self.tokenizer.encode(word, add_special_tokens=False)
if len(word_ids) > 1:
logger.warning("Word {} is split into multiple tokens: {}. \
If this is not what you expect, try using another word for this verbalizer" \
.format(word, self.tokenizer.convert_ids_to_tokens(word_ids)))
words_ids.append(word_ids)
max_len = max([len(ids) for ids in words_ids])
words_ids_mask = [[1]*len(ids) + [0]*(max_len-len(ids)) for ids in words_ids]
words_ids = [ids+[0]*(max_len-len(ids)) for ids in words_ids]
words_ids_tensor = torch.tensor(words_ids)
words_ids_mask = torch.tensor(words_ids_mask)
self.label_words_ids = nn.Parameter(words_ids_tensor, requires_grad=False)
self.label_words_mask = nn.Parameter(words_ids_mask, requires_grad=False)
def project(self,
logits: torch.Tensor,
**kwargs,
) -> torch.Tensor:
label_words_logits = logits[:, self.label_words_ids]
label_words_logits = self.handle_multi_token(label_words_logits, self.label_words_mask)
return label_words_logits
def process_logits(self, logits: torch.Tensor, **kwargs):
label_words_logits = self.project(logits, **kwargs)
label_words_probs = self.normalize(label_words_logits)
if hasattr(self, "_calibrate_logits") and self._calibrate_logits is not None:
label_words_probs = self.calibrate(label_words_probs=label_words_probs)
label_words_logits = torch.log(label_words_probs+1e-15)
return label_words_logits
def normalize(self, logits: torch.Tensor) -> torch.Tensor:
batch_size = logits.shape[0]
return F.softmax(logits.reshape(batch_size, -1), dim=-1).reshape(*logits.shape)
def calibrate(self, label_words_probs: torch.Tensor, **kwargs) -> torch.Tensor:
shape = label_words_probs.shape
assert self._calibrate_logits.dim() == 1, "self._calibrate_logits are not 1-d tensor"
calibrate_label_words_probs = self.normalize(self.project(self._calibrate_logits.unsqueeze(0), **kwargs))
assert calibrate_label_words_probs.shape[1:] == label_words_probs.shape[1:] \
and calibrate_label_words_probs.shape[0]==1, "shape not match"
label_words_probs /= (calibrate_label_words_probs+1e-15)
_probs.reshape(shape[0], -1).sum(dim=-1,keepdim=True)
label_words_probs /= norm
return label_words_probs
| true | true |
f71ab83062ace9e091517b08758d3a356d00ee8f | 643 | py | Python | CPSC362_Project1/migrations/versions/57642bbc5015_add_price.py | KonechyJ/CPSC-362_Project1 | c338f2e0e8e621e2fb1846277dcc0c1caaf1e41a | [
"MIT"
] | null | null | null | CPSC362_Project1/migrations/versions/57642bbc5015_add_price.py | KonechyJ/CPSC-362_Project1 | c338f2e0e8e621e2fb1846277dcc0c1caaf1e41a | [
"MIT"
] | null | null | null | CPSC362_Project1/migrations/versions/57642bbc5015_add_price.py | KonechyJ/CPSC-362_Project1 | c338f2e0e8e621e2fb1846277dcc0c1caaf1e41a | [
"MIT"
] | 2 | 2021-09-10T03:47:29.000Z | 2021-12-23T06:16:34.000Z | """Add price
Revision ID: 57642bbc5015
Revises: 6b66b7cc2f1f
Create Date: 2021-11-18 17:58:58.263480
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '57642bbc5015'
down_revision = '6b66b7cc2f1f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('post', sa.Column('price', sa.Integer(), nullable=False))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('post', 'price')
# ### end Alembic commands ###
| 22.172414 | 75 | 0.685848 | from alembic import op
import sqlalchemy as sa
revision = '57642bbc5015'
down_revision = '6b66b7cc2f1f'
branch_labels = None
depends_on = None
def upgrade():
| true | true |
f71ab8ed89dcd84727dfd18c9a588273b4b1ffe5 | 476 | py | Python | tests/container/elements.py | nadirhamid/protean | d31bc634e05c9221e82136bf18c2ceaa0982c1c8 | [
"BSD-3-Clause"
] | null | null | null | tests/container/elements.py | nadirhamid/protean | d31bc634e05c9221e82136bf18c2ceaa0982c1c8 | [
"BSD-3-Clause"
] | null | null | null | tests/container/elements.py | nadirhamid/protean | d31bc634e05c9221e82136bf18c2ceaa0982c1c8 | [
"BSD-3-Clause"
] | null | null | null | # Protean
from protean.core.field.basic import String
from protean.utils.container import BaseContainer
class CustomBaseContainer(BaseContainer):
def __new__(cls, *args, **kwargs):
if cls is CustomBaseContainer:
raise TypeError("CustomBaseContainer cannot be instantiated")
return super().__new__(cls)
class CustomContainer(CustomBaseContainer):
foo = String(max_length=50, required=True)
bar = String(max_length=50, required=True)
| 29.75 | 73 | 0.741597 |
from protean.core.field.basic import String
from protean.utils.container import BaseContainer
class CustomBaseContainer(BaseContainer):
def __new__(cls, *args, **kwargs):
if cls is CustomBaseContainer:
raise TypeError("CustomBaseContainer cannot be instantiated")
return super().__new__(cls)
class CustomContainer(CustomBaseContainer):
foo = String(max_length=50, required=True)
bar = String(max_length=50, required=True)
| true | true |
f71ab9c19de52f584719fbedb002bf798830562d | 544 | py | Python | py_pdf_term/endtoend/_endtoend/mappers/caches/xml.py | kumachan-mis/py-pdf-term | 282505826ce8c626003e753068d15738d772ce46 | [
"MIT"
] | null | null | null | py_pdf_term/endtoend/_endtoend/mappers/caches/xml.py | kumachan-mis/py-pdf-term | 282505826ce8c626003e753068d15738d772ce46 | [
"MIT"
] | 1 | 2021-08-02T13:02:12.000Z | 2021-08-02T13:02:12.000Z | py_pdf_term/endtoend/_endtoend/mappers/caches/xml.py | kumachan-mis/py-pdf-term | 282505826ce8c626003e753068d15738d772ce46 | [
"MIT"
] | null | null | null | from typing import Type
from ...caches import BaseXMLLayerCache, XMLLayerFileCache, XMLLayerNoCache
from ..base import BaseMapper
from ..consts import PACKAGE_NAME
class XMLLayerCacheMapper(BaseMapper[Type[BaseXMLLayerCache]]):
@classmethod
def default_mapper(cls) -> "XMLLayerCacheMapper":
default_mapper = cls()
cache_clses = [XMLLayerNoCache, XMLLayerFileCache]
for cache_cls in cache_clses:
default_mapper.add(f"{PACKAGE_NAME}.{cache_cls.__name__}", cache_cls)
return default_mapper
| 30.222222 | 81 | 0.740809 | from typing import Type
from ...caches import BaseXMLLayerCache, XMLLayerFileCache, XMLLayerNoCache
from ..base import BaseMapper
from ..consts import PACKAGE_NAME
class XMLLayerCacheMapper(BaseMapper[Type[BaseXMLLayerCache]]):
@classmethod
def default_mapper(cls) -> "XMLLayerCacheMapper":
default_mapper = cls()
cache_clses = [XMLLayerNoCache, XMLLayerFileCache]
for cache_cls in cache_clses:
default_mapper.add(f"{PACKAGE_NAME}.{cache_cls.__name__}", cache_cls)
return default_mapper
| true | true |
f71aba11c3ef384c490f493c022cda6fbf1433c8 | 3,220 | py | Python | rafter/blueprints.py | olivier-m/rafter | aafcf8fd019f24abcf519307c4484cc6b4697c04 | [
"MIT"
] | 1 | 2018-09-10T14:04:22.000Z | 2018-09-10T14:04:22.000Z | rafter/blueprints.py | olivier-m/rafter | aafcf8fd019f24abcf519307c4484cc6b4697c04 | [
"MIT"
] | null | null | null | rafter/blueprints.py | olivier-m/rafter | aafcf8fd019f24abcf519307c4484cc6b4697c04 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
.. autoclass:: Blueprint
"""
from sanic.blueprints import Blueprint as BaseBlueprint, FutureRoute
__all__ = ('Blueprint',)
class Blueprint(BaseBlueprint):
"""Create a new blueprint.
:param name: unique name of the blueprint
:param url_prefix: URL to be prefixed before all route URLs
:param strict_slashes: strict to trailing slash
.. automethod:: add_resource
.. automethod:: resource
"""
def __init__(self, *args, **kwargs):
super(Blueprint, self).__init__(*args, **kwargs)
self.resources = []
def register(self, app, options):
super(Blueprint, self).register(app, options)
url_prefix = options.get('url_prefix', self.url_prefix)
for future, kwargs in self.resources:
future.handler.__blueprintname__ = self.name
uri = url_prefix + future.uri if url_prefix else future.uri
version = future.version or self.version
app.resource(uri=uri[1:] if uri.startswith('//') else uri,
methods=future.methods,
host=future.host or self.host,
strict_slashes=future.strict_slashes,
stream=future.stream,
version=version,
name=future.name,
**kwargs)(future.handler)
def resource(self, uri, methods=frozenset({'GET'}), host=None,
strict_slashes=None, stream=False, version=None, name=None,
**kwargs):
"""
Create a blueprint resource route from a decorated function.
:param uri: endpoint at which the route will be accessible.
:param methods: list of acceptable HTTP methods.
:param host:
:param strict_slashes:
:param version:
:param name: user defined route name for url_for
:return: function or class instance
Accepts any keyword argument that will be passed to the app resource.
"""
if strict_slashes is None:
strict_slashes = self.strict_slashes
def decorator(handler):
self.resources.append((
FutureRoute(handler, uri, methods, host, strict_slashes,
stream, version, name),
kwargs))
return handler
return decorator
def add_resource(self, handler, uri, methods=frozenset({'GET'}),
host=None, strict_slashes=None, version=None, name=None,
**kwargs):
"""
Create a blueprint resource route from a function.
:param uri: endpoint at which the route will be accessible.
:param methods: list of acceptable HTTP methods.
:param host:
:param strict_slashes:
:param version:
:param name: user defined route name for url_for
:return: function or class instance
Accepts any keyword argument that will be passed to the app resource.
"""
self.resource(uri=uri, methods=methods, host=host,
strict_slashes=strict_slashes, version=version,
name=name, **kwargs)(handler)
| 34.623656 | 77 | 0.591925 |
from sanic.blueprints import Blueprint as BaseBlueprint, FutureRoute
__all__ = ('Blueprint',)
class Blueprint(BaseBlueprint):
def __init__(self, *args, **kwargs):
super(Blueprint, self).__init__(*args, **kwargs)
self.resources = []
def register(self, app, options):
super(Blueprint, self).register(app, options)
url_prefix = options.get('url_prefix', self.url_prefix)
for future, kwargs in self.resources:
future.handler.__blueprintname__ = self.name
uri = url_prefix + future.uri if url_prefix else future.uri
version = future.version or self.version
app.resource(uri=uri[1:] if uri.startswith('//') else uri,
methods=future.methods,
host=future.host or self.host,
strict_slashes=future.strict_slashes,
stream=future.stream,
version=version,
name=future.name,
**kwargs)(future.handler)
def resource(self, uri, methods=frozenset({'GET'}), host=None,
strict_slashes=None, stream=False, version=None, name=None,
**kwargs):
if strict_slashes is None:
strict_slashes = self.strict_slashes
def decorator(handler):
self.resources.append((
FutureRoute(handler, uri, methods, host, strict_slashes,
stream, version, name),
kwargs))
return handler
return decorator
def add_resource(self, handler, uri, methods=frozenset({'GET'}),
host=None, strict_slashes=None, version=None, name=None,
**kwargs):
self.resource(uri=uri, methods=methods, host=host,
strict_slashes=strict_slashes, version=version,
name=name, **kwargs)(handler)
| true | true |
f71aba25d68cbd1b1da66df6ca5eaabc6b86db83 | 1,244 | py | Python | setup.py | vtunr/VTun | f82b23945e95a3610e9bb7c54e62d0c51cac23a7 | [
"MIT"
] | 2 | 2020-04-14T19:14:07.000Z | 2022-02-14T14:49:44.000Z | setup.py | vtunr/VTun | f82b23945e95a3610e9bb7c54e62d0c51cac23a7 | [
"MIT"
] | 16 | 2020-01-20T10:19:17.000Z | 2022-01-15T18:05:55.000Z | setup.py | vtunr/VTunit | f82b23945e95a3610e9bb7c54e62d0c51cac23a7 | [
"MIT"
] | null | null | null | import setuptools
import subprocess
with open("README.md", "r") as fh:
long_description = fh.read()
packages = [dep.rstrip('\n') for dep in open("requirements.txt", "r")]
def get_git_version():
return subprocess.check_output(['git', 'describe','--dirty', '--tags']).strip()
setuptools.setup(
name="VTunit", # Replace with your own username
version=get_git_version(),
author="Tony Martinet",
author_email="tonymartinet@gmail.com",
description="Unit test helper",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/vtunr/VTunit",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points = {
'console_scripts': ['vtunit=vtunit:main',
'vtunit_cmake_generator=generator.mock_generator:main',
'vtunit_test_runner_generator=generator.test_runner_generator:main',
'vtunit_output_generator=generator.output_generator:main']
},
python_requires='>=2.7',
install_requires=packages
) | 36.588235 | 96 | 0.653537 | import setuptools
import subprocess
with open("README.md", "r") as fh:
long_description = fh.read()
packages = [dep.rstrip('\n') for dep in open("requirements.txt", "r")]
def get_git_version():
return subprocess.check_output(['git', 'describe','--dirty', '--tags']).strip()
setuptools.setup(
name="VTunit",
version=get_git_version(),
author="Tony Martinet",
author_email="tonymartinet@gmail.com",
description="Unit test helper",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/vtunr/VTunit",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points = {
'console_scripts': ['vtunit=vtunit:main',
'vtunit_cmake_generator=generator.mock_generator:main',
'vtunit_test_runner_generator=generator.test_runner_generator:main',
'vtunit_output_generator=generator.output_generator:main']
},
python_requires='>=2.7',
install_requires=packages
) | true | true |
f71abb077d128f03c4fd2fe2aa978ca83223d79e | 6,608 | py | Python | built-in/PyTorch/Official/cv/image_classification/Gluon_ResNet50_v1d_for_PyTorch/timm/optim/radam.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 12 | 2020-12-13T08:34:24.000Z | 2022-03-20T15:17:17.000Z | built-in/PyTorch/Official/cv/image_classification/Gluon_ResNet50_v1d_for_PyTorch/timm/optim/radam.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 1 | 2022-01-20T03:11:05.000Z | 2022-01-20T06:53:39.000Z | built-in/PyTorch/Official/cv/image_classification/Gluon_ResNet50_v1d_for_PyTorch/timm/optim/radam.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 2 | 2021-07-10T12:40:46.000Z | 2021-12-17T07:55:15.000Z | # Copyright [yyyy] [name of copyright owner]
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""RAdam Optimizer.
Implementation lifted from: https://github.com/LiyuanLucasLiu/RAdam
Paper: `On the Variance of the Adaptive Learning Rate and Beyond` - https://arxiv.org/abs/1908.03265
"""
import math
import torch
from torch.optim.optimizer import Optimizer, required
class RAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for ind in range(10)]
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(RAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = group['lr'] * math.sqrt(
(1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (
N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = group['lr'] / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
else:
p_data_fp32.add_(-step_size, exp_avg)
p.data.copy_(p_data_fp32)
return loss
class PlainRAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(PlainRAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(PlainRAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = group['lr'] * math.sqrt(
(1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (
N_sma_max - 2)) / (1 - beta1 ** state['step'])
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
else:
step_size = group['lr'] / (1 - beta1 ** state['step'])
p_data_fp32.add_(-step_size, exp_avg)
p.data.copy_(p_data_fp32)
return loss
| 39.100592 | 111 | 0.525272 |
import math
import torch
from torch.optim.optimizer import Optimizer, required
class RAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for ind in range(10)]
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(RAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
if N_sma >= 5:
step_size = group['lr'] * math.sqrt(
(1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (
N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = group['lr'] / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
else:
p_data_fp32.add_(-step_size, exp_avg)
p.data.copy_(p_data_fp32)
return loss
class PlainRAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(PlainRAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(PlainRAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
if N_sma >= 5:
step_size = group['lr'] * math.sqrt(
(1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (
N_sma_max - 2)) / (1 - beta1 ** state['step'])
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
else:
step_size = group['lr'] / (1 - beta1 ** state['step'])
p_data_fp32.add_(-step_size, exp_avg)
p.data.copy_(p_data_fp32)
return loss
| true | true |
f71abbabdf4197e4dad1e27bc472d450790c4613 | 44,512 | py | Python | theano/gof/graph.py | MarcCote/Theano | f0d293161a624ccf10c60ee8405a92e7d321151a | [
"BSD-3-Clause"
] | null | null | null | theano/gof/graph.py | MarcCote/Theano | f0d293161a624ccf10c60ee8405a92e7d321151a | [
"BSD-3-Clause"
] | null | null | null | theano/gof/graph.py | MarcCote/Theano | f0d293161a624ccf10c60ee8405a92e7d321151a | [
"BSD-3-Clause"
] | 1 | 2019-09-09T18:31:41.000Z | 2019-09-09T18:31:41.000Z | """
Node classes (`Apply`, `Variable`) and expression graph algorithms.
"""
from __future__ import absolute_import, print_function, division
from collections import deque
from copy import copy
from itertools import count
import theano
from theano import config
from theano.gof import utils
from six import string_types, integer_types, iteritems
from theano.misc.ordered_set import OrderedSet
__docformat__ = "restructuredtext en"
# Lazy imports to avoid circular dependencies.
is_same_graph_with_merge = None
equal_computations = None
NoParams = object()
class Node(utils.object2):
"""
A Node in a theano graph.
Graphs contain two kinds of Nodes -- Variable and Apply.
Edges in the graph are not explicitly represented.
Instead each Node keeps track of its parents via
Variable.owner / Apply.inputs and its children
via Variable.clients / Apply.outputs.
"""
def get_parents(self):
"""
Return a list of the parents of this node.
Should return a copy--i.e., modifying the return
value should not modify the graph structure.
"""
raise NotImplementedError()
class Apply(Node):
"""
An :term:`Apply` instance is a node in an expression graph which represents
the application of an `Op` to some input `Variable` nodes, producing some
output `Variable` nodes.
This class is typically instantiated by an Op's make_node() function, which
is typically called by that Op's __call__() function.
An Apply instance serves as a simple structure with three important
attributes:
- :literal:`inputs` : a list of `Variable` nodes that represent the
arguments of the expression,
- :literal:`outputs` : a list of `Variable` nodes that represent the
variable of the expression, and
- :literal:`op` : an `Op` instance that determines the nature of the
expression being applied.
The driver `compile.function` uses Apply's inputs attribute together with
Variable's owner attribute to search the expression graph and determine
which inputs are necessary to compute the function's outputs.
A `Linker` uses the Apply instance's `op` field to compute the variables.
Comparing with the Python language, an `Apply` instance is theano's version
of a function call (or expression instance) whereas `Op` is theano's version
of a function definition.
Parameters
----------
op : `Op` instance
inputs : list of Variable instances
outputs : list of Variable instances
Notes
-----
The owner field of each output in the outputs list will be set to self.
If an output element has an owner that is neither None nor self, then a
ValueError exception will be raised.
"""
def __init__(self, op, inputs, outputs):
self.op = op
self.inputs = []
self.tag = utils.scratchpad()
if not isinstance(inputs, (list, tuple)):
raise TypeError("The inputs of an Apply must be a list or tuple")
if not isinstance(outputs, (list, tuple)):
raise TypeError("The output of an Apply must be a list or tuple")
# filter inputs to make sure each element is a Variable
for input in inputs:
if isinstance(input, Variable):
self.inputs.append(input)
else:
raise TypeError("The 'inputs' argument to Apply must contain Variable instances, not %s" % input)
self.outputs = []
# filter outputs to make sure each element is a Variable
for i, output in enumerate(outputs):
if isinstance(output, Variable):
if output.owner is None:
output.owner = self
output.index = i
elif output.owner is not self or output.index != i:
raise ValueError("All output variables passed to Apply must belong to it.")
self.outputs.append(output)
else:
raise TypeError("The 'outputs' argument to Apply must contain Variable instances with no owner, not %s" % output)
def run_params(self):
"""
Returns the params for the node, or NoParams if no params is set.
"""
if hasattr(self.op, 'get_params'):
return self.op.get_params(self)
return NoParams
def __getstate__(self):
d = self.__dict__
# ufunc don't pickle/unpickle well
if hasattr(self.tag, 'ufunc'):
d = copy(self.__dict__)
t = d["tag"]
del t.ufunc
d["tag"] = t
return d
def default_output(self):
"""
Returns the default output for this node.
Returns
-------
Variable instance
An element of self.outputs, typically self.outputs[0].
Notes
-----
May raise AttributeError self.op.default_output is out of range, or if
there are multiple outputs and self.op.default_output does not exist.
"""
do = getattr(self.op, 'default_output', None)
if do is None:
if len(self.outputs) == 1:
return self.outputs[0]
else:
raise AttributeError(
"%s.default_output should be an output index." % self.op)
elif not isinstance(do, integer_types):
raise AttributeError("%s.default_output should be an int or long" %
self.op)
elif do < 0 or do >= len(self.outputs):
raise AttributeError("%s.default_output is out of range." %
self.op)
return self.outputs[do]
out = property(default_output,
doc="alias for self.default_output()")
"""
Alias for self.default_output().
"""
def __str__(self):
return op_as_string(self.inputs, self)
def __repr__(self):
return str(self)
def __asapply__(self):
return self
def clone(self):
"""
Duplicate this Apply instance with inputs = self.inputs.
Returns
-------
object
A new Apply instance (or subclass instance) with new outputs.
Notes
-----
Tags are copied from self to the returned instance.
"""
cp = self.__class__(self.op, self.inputs,
[output.clone() for output in self.outputs])
cp.tag = copy(self.tag)
return cp
def clone_with_new_inputs(self, inputs, strict=True):
"""
Duplicate this Apply instance in a new graph.
Parameters
----------
inputs
List of Variable instances to use as inputs.
strict : bool
If True, the type fields of all the inputs must be equal
to the current ones (or compatible, for instance Tensor /
CudaNdarray of the same dtype and broadcastable patterns,
in which case they will be converted into current Type), and
returned outputs are guaranteed to have the same types as
self.outputs. If False, then there's no guarantee that the
clone's outputs will have the same types as self.outputs,
and cloning may not even be possible (it depends on the Op).
Returns
-------
object
An Apply instance with the same op but different outputs.
"""
assert isinstance(inputs, (list, tuple))
remake_node = False
new_inputs = inputs[:]
for i, (curr, new) in enumerate(zip(self.inputs, new_inputs)):
if not curr.type == new.type:
if strict:
# If compatible, casts new into curr.type
new_inputs[i] = curr.type.filter_variable(new)
else:
remake_node = True
if remake_node:
new_node = self.op.make_node(*new_inputs)
new_node.tag = copy(self.tag).__update__(new_node.tag)
else:
new_node = self.clone()
new_node.inputs = new_inputs
return new_node
def get_parents(self):
return list(self.inputs)
# convenience properties
nin = property(lambda self: len(self.inputs), doc='same as len(self.inputs)')
"""
Property: Number of inputs.
"""
nout = property(lambda self: len(self.outputs), doc='same as len(self.outputs)')
"""
Property: Number of outputs.
"""
params_type = property(lambda self: self.op.params_type, doc='type to use for the params')
class Variable(Node):
"""
A :term:`Variable` is a node in an expression graph that represents a
variable.
The inputs and outputs of every `Apply` (theano.gof.Apply) are `Variable`
instances. The input and output arguments to create a `function` are also
`Variable` instances. A `Variable` is like a strongly-typed variable in
some other languages; each `Variable` contains a reference to a `Type`
instance that defines the kind of value the `Variable` can take in a
computation.
A `Variable` is a container for four important attributes:
- :literal:`type` a `Type` instance defining the kind of value this
`Variable` can have,
- :literal:`owner` either None (for graph roots) or the `Apply` instance
of which `self` is an output,
- :literal:`index` the integer such that :literal:`owner.outputs[index] is
this_variable` (ignored if `owner` is None),
- :literal:`name` a string to use in pretty-printing and debugging.
There are a few kinds of Variables to be aware of: A Variable which is the
output of a symbolic computation has a reference to the Apply instance to
which it belongs (property: owner) and the position of itself in the owner's
output list (property: index).
- `Variable` (this base type) is typically the output of a symbolic
computation.
- `Constant` (a subclass) which adds a default and un-replaceable
:literal:`value`, and requires that owner is None.
- `TensorVariable` subclass of Variable that represents a numpy.ndarray
object.
- `TensorSharedVariable` Shared version of TensorVariable.
- `SparseVariable` subclass of Variable that represents
a scipy.sparse.{csc,csr}_matrix object.
- `CudaNdarrayVariable` subclass of Variable that represents our object on
the GPU that is a subset of numpy.ndarray.
- `RandomVariable`.
A Variable which is the output of a symbolic computation will have an owner
not equal to None.
Using the Variables' owner field and the Apply nodes' inputs fields, one can
navigate a graph from an output all the way to the inputs. The opposite
direction is not possible until a FunctionGraph has annotated the Variables
with the clients field, ie, before the compilation process has begun a
Variable does not know which Apply nodes take it as input.
Parameters
----------
type : a Type instance
The type governs the kind of data that can be associated with this
variable.
owner : None or Apply instance
The Apply instance which computes the value for this variable.
index : None or int
The position of this Variable in owner.outputs.
name : None or str
A string for pretty-printing and debugging.
Examples
--------
.. code-block:: python
import theano
from theano import tensor
a = tensor.constant(1.5) # declare a symbolic constant
b = tensor.fscalar() # declare a symbolic floating-point scalar
c = a + b # create a simple expression
f = theano.function([b], [c]) # this works because a has a value associated with it already
assert 4.0 == f(2.5) # bind 2.5 to an internal copy of b and evaluate an internal c
theano.function([a], [c]) # compilation error because b (required by c) is undefined
theano.function([a,b], [c]) # compilation error because a is constant, it can't be an input
d = tensor.value(1.5) # create a value similar to the constant 'a'
e = d + b
theano.function([d,b], [e]) # this works. d's default value of 1.5 is ignored.
The python variables :literal:`a,b,c` all refer to instances of type
`Variable`. The `Variable` refered to by `a` is also an instance of
`Constant`.
`compile.function` uses each `Apply` instance's `inputs` attribute together
with each Variable's `owner` field to determine which inputs are necessary
to compute the function's outputs.
"""
# __slots__ = ['type', 'owner', 'index', 'name']
__count__ = count(0)
def __init__(self, type, owner=None, index=None, name=None):
super(Variable, self).__init__()
self.tag = utils.scratchpad()
self.type = type
if owner is not None and not isinstance(owner, Apply):
raise TypeError("owner must be an Apply instance", owner)
self.owner = owner
if index is not None and not isinstance(index, integer_types):
raise TypeError("index must be an int", index)
self.index = index
if name is not None and not isinstance(name, string_types):
raise TypeError("name must be a string", name)
self.name = name
self.auto_name = 'auto_' + str(next(self.__count__))
def __str__(self):
"""Return a str representation of the Variable.
"""
if self.name is not None:
return self.name
if self.owner is not None:
op = self.owner.op
if self.index == op.default_output:
return str(self.owner.op) + ".out"
else:
return str(self.owner.op) + "." + str(self.index)
else:
return "<%s>" % str(self.type)
def __repr_test_value__(self):
"""Return a repr of the test value.
Return a printable representation of the test value. It can be
overridden by classes with non printable test_value to provide a
suitable representation of the test_value.
"""
return repr(theano.gof.op.get_test_value(self))
def __repr__(self, firstPass=True):
"""Return a repr of the Variable.
Return a printable name or description of the Variable. If
config.print_test_value is True it will also print the test_value if
any.
"""
to_print = [str(self)]
if config.print_test_value and firstPass:
try:
to_print.append(self.__repr_test_value__())
except AttributeError:
pass
return '\n'.join(to_print)
def clone(self):
"""
Return a new Variable like self.
Returns
-------
Variable instance
A new Variable instance (or subclass instance) with no owner or
index.
Notes
-----
Tags are copied to the returned instance.
Name is copied to the returned instance.
"""
# return copy(self)
cp = self.__class__(self.type, None, None, self.name)
cp.tag = copy(self.tag)
return cp
def __lt__(self, other):
raise NotImplementedError('Subclasses of Variable must provide __lt__',
self.__class__.__name__)
def __le__(self, other):
raise NotImplementedError('Subclasses of Variable must provide __le__',
self.__class__.__name__)
def __gt__(self, other):
raise NotImplementedError('Subclasses of Variable must provide __gt__',
self.__class__.__name__)
def __ge__(self, other):
raise NotImplementedError('Subclasses of Variable must provide __ge__',
self.__class__.__name__)
def get_parents(self):
if self.owner is not None:
return [self.owner]
return []
def eval(self, inputs_to_values=None):
"""
Evaluates this variable.
Parameters
----------
inputs_to_values
A dictionary mapping theano Variables to values.
Examples
--------
>>> import numpy as np
>>> import theano.tensor as T
>>> x = T.dscalar('x')
>>> y = T.dscalar('y')
>>> z = x + y
>>> np.allclose(z.eval({x : 16.3, y : 12.1}), 28.4)
True
We passed :func:`eval` a dictionary mapping symbolic theano
variables to the values to substitute for them, and it returned
the numerical value of the expression.
Notes
-----
`eval` will be slow the first time you call it on a variable --
it needs to call :func:`function` to compile the expression behind
the scenes. Subsequent calls to :func:`eval` on that same variable
will be fast, because the variable caches the compiled function.
This way of computing has more overhead than a normal Theano
function, so don't use it too much in real scripts.
"""
if inputs_to_values is None:
inputs_to_values = {}
if not hasattr(self, '_fn_cache'):
self._fn_cache = dict()
inputs = tuple(sorted(inputs_to_values.keys(), key=id))
if inputs not in self._fn_cache:
self._fn_cache[inputs] = theano.function(inputs, self)
args = [inputs_to_values[param] for param in inputs]
rval = self._fn_cache[inputs](*args)
return rval
def __getstate__(self):
d = self.__dict__.copy()
d.pop("_fn_cache", None)
return d
class Constant(Variable):
"""
A :term:`Constant` is a `Variable` with a `value` field that cannot be
changed at runtime.
Constant nodes make eligible numerous optimizations: constant inlining in
C code, constant folding, etc.
Notes
-----
The data field is filtered by what is provided in the constructor for the
Constant's type field.
WRITEME
"""
# __slots__ = ['data']
def __init__(self, type, data, name=None):
Variable.__init__(self, type, None, None, name)
self.data = type.filter(data)
utils.add_tag_trace(self)
def equals(self, other):
# this does what __eq__ should do, but Variable and Apply should always be hashable by id
return isinstance(other, Constant) and self.signature() == other.signature()
def signature(self):
return (self.type, self.data)
def merge_signature(self):
return self.signature()
def __str__(self):
if self.name is not None:
return self.name
else:
name = str(self.data)
if len(name) > 20:
name = name[:10] + '...' + name[-10:]
return 'Constant{%s}' % name
def clone(self):
"""
We clone this object, but we don't clone the data to lower memory
requirement. We suppose that the data will never change.
"""
cp = self.__class__(self.type, self.data, self.name)
cp.tag = copy(self.tag)
return cp
def __set_owner(self, value):
"""
WRITEME
Raises
------
ValueError
If `value` is not `None`.
"""
if value is not None:
raise ValueError("Constant instances cannot have an owner.")
owner = property(lambda self: None, __set_owner)
value = property(lambda self: self.data, doc='read-only data access method')
# index is not defined, because the `owner` attribute must necessarily be None
def stack_search(start, expand, mode='bfs', build_inv=False):
"""
Search through a graph, either breadth- or depth-first.
Parameters
----------
start : deque
Search from these nodes.
expand : callable
When we get to a node, add expand(node) to the list of nodes to visit.
This function should return a list, or None.
Returns
-------
list of `Variable` or `Apply` instances (depends on `expend`)
The list of nodes in order of traversal.
Notes
-----
A node will appear at most once in the return value, even if it
appears multiple times in the start parameter.
:postcondition: every element of start is transferred to the returned list.
:postcondition: start is empty.
"""
if mode not in ('bfs', 'dfs'):
raise ValueError('mode should be bfs or dfs', mode)
rval_set = set()
rval_list = list()
if mode == 'bfs':
start_pop = start.popleft
else:
start_pop = start.pop
expand_inv = {}
while start:
l = start_pop()
if id(l) not in rval_set:
rval_list.append(l)
rval_set.add(id(l))
expand_l = expand(l)
if expand_l:
if build_inv:
for r in expand_l:
expand_inv.setdefault(r, []).append(l)
start.extend(expand_l)
assert len(rval_list) == len(rval_set)
if build_inv:
return rval_list, expand_inv
return rval_list
def ancestors(variable_list, blockers=None):
"""
Return the variables that contribute to those in variable_list (inclusive).
Parameters
----------
variable_list : list of `Variable` instances
Output `Variable` instances from which to search backward through
owners.
Returns
-------
list of `Variable` instances
All input nodes, in the order found by a left-recursive depth-first
search started at the nodes in `variable_list`.
"""
def expand(r):
if r.owner and (not blockers or r not in blockers):
return reversed(r.owner.inputs)
dfs_variables = stack_search(deque(variable_list), expand, 'dfs')
return dfs_variables
def inputs(variable_list, blockers=None):
"""
Return the inputs required to compute the given Variables.
Parameters
----------
variable_list : list of `Variable` instances
Output `Variable` instances from which to search backward through
owners.
Returns
-------
list of `Variable` instances
Input nodes with no owner, in the order found by a left-recursive
depth-first search started at the nodes in `variable_list`.
"""
vlist = ancestors(variable_list, blockers)
rval = [r for r in vlist if r.owner is None]
return rval
def variables_and_orphans(i, o):
"""
Extract list of variables between i and o nodes via
dfs traversal and chooses the orphans among them
Parameters
----------
i : list
Input variables.
o : list
Output variables.
"""
def expand(r):
if r.owner and r not in i:
l = list(r.owner.inputs) + list(r.owner.outputs)
l.reverse()
return l
variables = stack_search(deque(o), expand, 'dfs')
orphans = [r for r in variables if r.owner is None and r not in i]
return variables, orphans
def ops(i, o):
"""
Set of Ops contained within the subgraph between i and o
Parameters
----------
i : list
Input variables.
o : list
Output variables.
Returns
-------
object
The set of ops that are contained within the subgraph that lies
between i and o, including the owners of the variables in o and
intermediary ops between i and o, but not the owners of the variables
in i.
"""
ops = set()
variables, orphans = variables_and_orphans(i, o)
for r in variables:
if r not in i and r not in orphans:
if r.owner is not None:
ops.add(r.owner)
return ops
def variables(i, o):
"""
Extracts list of variables within input and output nodes via dfs travesal
Parameters
----------
i : list
Input variables.
o : list
Output variables.
Returns
-------
object
The set of Variables that are involved in the subgraph that lies
between i and o. This includes i, o, orphans(i, o) and all values of
all intermediary steps from i to o.
"""
return variables_and_orphans(i, o)[0]
def orphans(i, o):
"""
Extracts list of variables within input and output nodes
via dfs travesal and returns the orphans among them
Parameters
----------
i : list
Input Variables.
o : list
Output Variables.
Returns
-------
object
The set of Variables which one or more Variables in o depend on but are
neither in i nor in the subgraph that lies between i and o.
Examples
--------
orphans([x], [(x+y).out]) => [y]
"""
return variables_and_orphans(i, o)[1]
def clone(i, o, copy_inputs=True):
"""
Copies the subgraph contained between i and o.
Parameters
----------
i : list
Input Variables.
o : list
Output Variables.
copy_inputs : bool
If True, the inputs will be copied (defaults to True).
Returns
-------
object
The inputs and outputs of that copy.
"""
equiv = clone_get_equiv(i, o, copy_inputs)
return [equiv[input] for input in i], [equiv[output] for output in o]
def clone_get_equiv(inputs, outputs, copy_inputs_and_orphans=True, memo=None):
"""
Return a dictionary that maps from Variable and Apply nodes in the
original graph to a new node (a clone) in a new graph.
This function works by recursively cloning inputs... rebuilding a directed
graph from the inputs up to eventually building new outputs.
Parameters
----------
inputs : a list of Variables
outputs : a list of Variables
copy_inputs_and_orphans : bool
True means to create the cloned graph from new input and constant
nodes (the bottom of a feed-upward graph).
False means to clone a graph that is rooted at the original input
nodes.
memo : None or dict
Optionally start with a partly-filled dictionary for the return value.
If a dictionary is passed, this function will work in-place on that
dictionary and return it.
"""
if memo is None:
memo = {}
# clone the inputs if necessary
for input in inputs:
if copy_inputs_and_orphans:
cpy = input.clone()
cpy.owner = None
cpy.index = None
memo.setdefault(input, cpy)
else:
memo.setdefault(input, input)
# go through the inputs -> outputs graph cloning as we go
for apply in io_toposort(inputs, outputs):
for input in apply.inputs:
if input not in memo:
if copy_inputs_and_orphans:
cpy = input.clone()
memo[input] = cpy
else:
memo[input] = input
new_apply = apply.clone_with_new_inputs([memo[i] for i in apply.inputs])
memo.setdefault(apply, new_apply)
for output, new_output in zip(apply.outputs, new_apply.outputs):
memo.setdefault(output, new_output)
# finish up by cloning any remaining outputs (it can happen)
for output in outputs:
if output not in memo:
memo[output] = output.clone()
return memo
def general_toposort(r_out, deps, debug_print=False,
compute_deps_cache=None, deps_cache=None,
clients=None):
"""
WRITEME
Parameters
----------
deps
A python function that takes a node as input and returns its dependence.
compute_deps_cache : optional
If provided deps_cache should also be provided. This is a function like
deps, but that also cache its results in a dict passed as deps_cache.
deps_cache : dict
Must be used with compute_deps_cache.
clients : dict
If a dict is passed it will be filled with a mapping of node
-> clients for each node in the subgraph.
Notes
-----
deps(i) should behave like a pure function (no funny business with
internal state).
deps(i) will be cached by this function (to be fast).
The order of the return value list is determined by the order of nodes
returned by the deps() function.
deps should be provided or can be None and the caller provides
compute_deps_cache and deps_cache. The second option removes a Python
function call, and allows for more specialized code, so it can be
faster.
"""
if compute_deps_cache is None:
deps_cache = {}
def compute_deps_cache(io):
if io not in deps_cache:
d = deps(io)
if d:
if not isinstance(d, (list, OrderedSet)):
raise TypeError(
"Non-deterministic collections here make"
" toposort non-deterministic.")
deps_cache[io] = list(d)
else:
deps_cache[io] = d
return d
else:
return deps_cache[io]
assert deps_cache is not None
assert isinstance(r_out, (tuple, list, deque))
reachable, _clients = stack_search(deque(r_out), compute_deps_cache,
'dfs', True)
if clients is not None:
clients.update(_clients)
sources = deque([r for r in reachable if not deps_cache.get(r, None)])
rset = set()
rlist = []
while sources:
node = sources.popleft()
if node not in rset:
rlist.append(node)
rset.add(node)
for client in _clients.get(node, []):
deps_cache[client] = [a for a in deps_cache[client]
if a is not node]
if not deps_cache[client]:
sources.append(client)
if len(rlist) != len(reachable):
if debug_print:
print('')
print(reachable)
print(rlist)
raise ValueError('graph contains cycles')
return rlist
def io_toposort(inputs, outputs, orderings=None, clients=None):
"""
Perform topological sort from input and output nodes
Parameters
----------
inputs : list or tuple of Variable instances
outputs : list or tuple of Apply instances
orderings : dict
Key: Apply instance. Value: list of Apply instance.
It is important that the value be a container with a deterministic
iteration order. No sets allowed!
clients : dict
If a dict is provided it will be filled with mappings of
node->clients for each node in the subgraph that is sorted
"""
# the inputs are used only here in the function that decides what 'predecessors' to explore
iset = set(inputs)
# We build 2 functions as a speed up
deps_cache = {}
compute_deps = None
compute_deps_cache = None
if not orderings: # can be None or empty dict
# Specialized function that is faster when no ordering.
# Also include the cache in the function itself for speed up.
def compute_deps_cache(obj):
if obj in deps_cache:
return deps_cache[obj]
rval = []
if obj not in iset:
if isinstance(obj, Variable):
if obj.owner:
rval = [obj.owner]
elif isinstance(obj, Apply):
rval = list(obj.inputs)
if rval:
if not isinstance(rval, (list, OrderedSet)):
raise TypeError(
"Non-deterministic collections here make"
" toposort non-deterministic.")
deps_cache[obj] = list(rval)
else:
deps_cache[obj] = rval
else:
deps_cache[obj] = rval
return rval
else:
def compute_deps(obj):
rval = []
if obj not in iset:
if isinstance(obj, Variable):
if obj.owner:
rval = [obj.owner]
elif isinstance(obj, Apply):
rval = list(obj.inputs)
rval.extend(orderings.get(obj, []))
else:
assert not orderings.get(obj, [])
return rval
topo = general_toposort(outputs, deps=compute_deps,
compute_deps_cache=compute_deps_cache,
deps_cache=deps_cache, clients=clients)
return [o for o in topo if isinstance(o, Apply)]
default_leaf_formatter = str
def default_node_formatter(op, argstrings):
return "%s(%s)" % (op.op, ", ".join(argstrings))
def io_connection_pattern(inputs, outputs):
"""
Returns the connection pattern of a subgraph defined by given
inputs and outputs.
"""
inner_nodes = io_toposort(inputs, outputs)
# Initialize 'connect_pattern_by_var' by establishing each input as
# connected only to itself
connect_pattern_by_var = {}
nb_inputs = len(inputs)
for i in range(nb_inputs):
input = inputs[i]
inp_connection_pattern = [i == j for j in range(nb_inputs)]
connect_pattern_by_var[input] = inp_connection_pattern
# Iterate through the nodes used to produce the outputs from the
# inputs and, for every node, infer their connection pattern to
# every input from the connection patterns of their parents.
for n in inner_nodes:
# Get the connection pattern of the inner node's op. If the op
# does not define a connection_pattern method, assume that
# every node output is connected to every node input
try:
op_connection_pattern = n.op.connection_pattern(n)
except AttributeError:
op_connection_pattern = ([[True] * len(n.outputs)] *
len(n.inputs))
# For every output of the inner node, figure out which inputs it
# is connected to by combining the connection pattern of the inner
# node and the connection patterns of the inner node's inputs.
for out_idx in range(len(n.outputs)):
out = n.outputs[out_idx]
out_connection_pattern = [False] * nb_inputs
for inp_idx in range(len(n.inputs)):
inp = n.inputs[inp_idx]
if inp in connect_pattern_by_var:
inp_connection_pattern = connect_pattern_by_var[inp]
# If the node output is connected to the node input, it
# means it is connected to every inner input that the
# node inputs is connected to
if op_connection_pattern[inp_idx][out_idx]:
out_connection_pattern = [out_connection_pattern[i] or
inp_connection_pattern[i]
for i in range(nb_inputs)]
# Store the connection pattern of the node output
connect_pattern_by_var[out] = out_connection_pattern
# Obtain the global connection pattern by combining the
# connnection patterns of the individual outputs
global_connection_pattern = [[] for o in range(len(inputs))]
for out in outputs:
out_connection_pattern = connect_pattern_by_var.get(out)
if out_connection_pattern is None:
# the output is completely isolated from inputs
out_connection_pattern = [False] * len(inputs)
for i in range(len(inputs)):
global_connection_pattern[i].append(out_connection_pattern[i])
return global_connection_pattern
def is_same_graph(var1, var2, givens=None, debug=False):
"""
Return True iff Variables `var1` and `var2` perform the same computation.
By 'performing the same computation', we mean that they must share the same
graph, so that for instance this function will return False when comparing
(x * (y * z)) with ((x * y) * z).
The current implementation is not efficient since, when possible, it
verifies equality by calling two different functions that are expected to
return the same output. The goal is to verify this assumption, to
eventually get rid of one of them in the future.
Parameters
----------
var1
The first Variable to compare.
var2
The second Variable to compare.
givens
Similar to the `givens` argument of `theano.function`, it can be used
to perform substitutions in the computational graph of `var1` and
`var2`. This argument is associated to neither `var1` nor `var2`:
substitutions may affect both graphs if the substituted variable
is present in both.
debug : bool
If True, then an exception is raised when we are in a situation where
the `equal_computations` implementation cannot be called.
This parameter is intended to be used in tests only, to make sure we
properly test both implementations.
Examples
--------
====== ====== ====== ======
var1 var2 givens output
====== ====== ====== ======
x + 1 x + 1 {} True
x + 1 y + 1 {} False
x + 1 y + 1 {x: y} True
====== ====== ====== ======
"""
# Lazy import.
if givens is None:
givens = {}
global equal_computations, is_same_graph_with_merge
if equal_computations is None:
from theano.gof.opt import is_same_graph_with_merge
from theano.scan_module.scan_utils import equal_computations
# Convert `givens` to dictionary.
if not isinstance(givens, dict):
givens = dict(givens)
# Get result from the merge-based function.
rval1 = is_same_graph_with_merge(var1=var1, var2=var2, givens=givens)
# Get result from the function `equal_computations` from scan_utils.
use_equal_computations = True
if givens:
# We need to build the `in_xs` and `in_ys` lists. To do this, we need
# to be able to tell whether a variable belongs to the computational
# graph of `var1` or `var2`.
# The typical case we want to handle is when `to_replace` belongs to
# one of these graphs, and `replace_by` belongs to the other one. In
# other situations, the current implementation of `equal_computations`
# is probably not appropriate, so we do not call it.
ok = True
in_xs = []
in_ys = []
# Compute the sets of all variables found in each computational graph.
inputs_var = list(map(inputs, ([var1], [var2])))
all_vars = [set(variables(v_i, v_o))
for v_i, v_o in ((inputs_var[0], [var1]),
(inputs_var[1], [var2]))]
def in_var(x, k):
# Return True iff `x` is in computation graph of variable `vark`.
return x in all_vars[k - 1]
for to_replace, replace_by in iteritems(givens):
# Map a substitution variable to the computational graphs it
# belongs to.
inside = dict((v, [in_var(v, k) for k in (1, 2)])
for v in (to_replace, replace_by))
if (inside[to_replace][0] and not inside[to_replace][1] and
inside[replace_by][1] and not inside[replace_by][0]):
# Substitute variable in `var1` by one from `var2`.
in_xs.append(to_replace)
in_ys.append(replace_by)
elif (inside[to_replace][1] and not inside[to_replace][0] and
inside[replace_by][0] and not inside[replace_by][1]):
# Substitute variable in `var2` by one from `var1`.
in_xs.append(replace_by)
in_ys.append(to_replace)
else:
ok = False
break
if not ok:
# We cannot directly use `equal_computations`.
if debug:
raise AssertionError(
'When `debug` is True we want to make sure we are also '
'using the `equal_computations` implementation')
use_equal_computations = False
else:
in_xs = None
in_ys = None
if use_equal_computations:
rval2 = equal_computations(xs=[var1], ys=[var2],
in_xs=in_xs, in_ys=in_ys)
assert rval2 == rval1
return rval1
def op_as_string(i, op,
leaf_formatter=default_leaf_formatter,
node_formatter=default_node_formatter):
"""
Op to return a string representation of the subgraph
between i and o
"""
strs = as_string(i, op.inputs, leaf_formatter, node_formatter)
return node_formatter(op, strs)
def as_string(i, o,
leaf_formatter=default_leaf_formatter,
node_formatter=default_node_formatter):
"""
Returns a string representation of the subgraph between i and o
Parameters
----------
i : list
Input `Variable` s.
o : list
Output `Variable` s.
leaf_formatter : callable
Takes a `Variable` and returns a string to describe it.
node_formatter : callable
Takes an `Op` and the list of strings corresponding to its arguments
and returns a string to describe it.
Returns
-------
str
Returns a string representation of the subgraph between i and o. If the
same op is used by several other ops, the first occurrence will be
marked as :literal:`*n -> description` and all subsequent occurrences
will be marked as :literal:`*n`, where n is an id number (ids are
attributed in an unspecified order and only exist for viewing
convenience).
"""
i = set(i)
orph = orphans(i, o)
multi = set()
seen = set()
for output in o:
op = output.owner
if op in seen:
multi.add(op)
else:
seen.add(op)
for op in ops(i, o):
for input in op.inputs:
op2 = input.owner
if input in i or input in orph or op2 is None:
continue
if op2 in seen:
multi.add(op2)
else:
seen.add(input.owner)
multi = [x for x in multi]
done = set()
def multi_index(x):
return multi.index(x) + 1
def describe(r):
if r.owner is not None and r not in i and r not in orph:
op = r.owner
idx = op.outputs.index(r)
if len(op.outputs) == 1:
idxs = ""
else:
idxs = "::%i" % idx
if op in done:
return "*%i%s" % (multi_index(op), idxs)
else:
done.add(op)
s = node_formatter(op, [describe(input) for input in op.inputs])
if op in multi:
return "*%i -> %s" % (multi_index(op), s)
else:
return s
else:
return leaf_formatter(r)
return [describe(output) for output in o]
def view_roots(r):
"""
Utility function that returns the leaves of a search through
consecutive view_map()s.
WRITEME
"""
owner = r.owner
if owner is not None:
try:
view_map = owner.op.view_map
view_map = dict((owner.outputs[o], i)
for o, i in iteritems(view_map))
except AttributeError:
return [r]
if r in view_map:
answer = []
for i in view_map[r]:
answer += view_roots(owner.inputs[i])
return answer
else:
return [r]
else:
return [r]
def list_of_nodes(inputs, outputs):
"""
Return the apply nodes of the graph between inputs and outputs.
"""
return stack_search(
deque([o.owner for o in outputs]),
lambda o: [inp.owner for inp in o.inputs
if inp.owner and
not any(i in inp.owner.outputs for i in inputs)])
| 32.898744 | 129 | 0.596671 | from __future__ import absolute_import, print_function, division
from collections import deque
from copy import copy
from itertools import count
import theano
from theano import config
from theano.gof import utils
from six import string_types, integer_types, iteritems
from theano.misc.ordered_set import OrderedSet
__docformat__ = "restructuredtext en"
is_same_graph_with_merge = None
equal_computations = None
NoParams = object()
class Node(utils.object2):
def get_parents(self):
raise NotImplementedError()
class Apply(Node):
def __init__(self, op, inputs, outputs):
self.op = op
self.inputs = []
self.tag = utils.scratchpad()
if not isinstance(inputs, (list, tuple)):
raise TypeError("The inputs of an Apply must be a list or tuple")
if not isinstance(outputs, (list, tuple)):
raise TypeError("The output of an Apply must be a list or tuple")
for input in inputs:
if isinstance(input, Variable):
self.inputs.append(input)
else:
raise TypeError("The 'inputs' argument to Apply must contain Variable instances, not %s" % input)
self.outputs = []
for i, output in enumerate(outputs):
if isinstance(output, Variable):
if output.owner is None:
output.owner = self
output.index = i
elif output.owner is not self or output.index != i:
raise ValueError("All output variables passed to Apply must belong to it.")
self.outputs.append(output)
else:
raise TypeError("The 'outputs' argument to Apply must contain Variable instances with no owner, not %s" % output)
def run_params(self):
if hasattr(self.op, 'get_params'):
return self.op.get_params(self)
return NoParams
def __getstate__(self):
d = self.__dict__
if hasattr(self.tag, 'ufunc'):
d = copy(self.__dict__)
t = d["tag"]
del t.ufunc
d["tag"] = t
return d
def default_output(self):
do = getattr(self.op, 'default_output', None)
if do is None:
if len(self.outputs) == 1:
return self.outputs[0]
else:
raise AttributeError(
"%s.default_output should be an output index." % self.op)
elif not isinstance(do, integer_types):
raise AttributeError("%s.default_output should be an int or long" %
self.op)
elif do < 0 or do >= len(self.outputs):
raise AttributeError("%s.default_output is out of range." %
self.op)
return self.outputs[do]
out = property(default_output,
doc="alias for self.default_output()")
def __str__(self):
return op_as_string(self.inputs, self)
def __repr__(self):
return str(self)
def __asapply__(self):
return self
def clone(self):
cp = self.__class__(self.op, self.inputs,
[output.clone() for output in self.outputs])
cp.tag = copy(self.tag)
return cp
def clone_with_new_inputs(self, inputs, strict=True):
assert isinstance(inputs, (list, tuple))
remake_node = False
new_inputs = inputs[:]
for i, (curr, new) in enumerate(zip(self.inputs, new_inputs)):
if not curr.type == new.type:
if strict:
# If compatible, casts new into curr.type
new_inputs[i] = curr.type.filter_variable(new)
else:
remake_node = True
if remake_node:
new_node = self.op.make_node(*new_inputs)
new_node.tag = copy(self.tag).__update__(new_node.tag)
else:
new_node = self.clone()
new_node.inputs = new_inputs
return new_node
def get_parents(self):
return list(self.inputs)
# convenience properties
nin = property(lambda self: len(self.inputs), doc='same as len(self.inputs)')
nout = property(lambda self: len(self.outputs), doc='same as len(self.outputs)')
params_type = property(lambda self: self.op.params_type, doc='type to use for the params')
class Variable(Node):
# __slots__ = ['type', 'owner', 'index', 'name']
__count__ = count(0)
def __init__(self, type, owner=None, index=None, name=None):
super(Variable, self).__init__()
self.tag = utils.scratchpad()
self.type = type
if owner is not None and not isinstance(owner, Apply):
raise TypeError("owner must be an Apply instance", owner)
self.owner = owner
if index is not None and not isinstance(index, integer_types):
raise TypeError("index must be an int", index)
self.index = index
if name is not None and not isinstance(name, string_types):
raise TypeError("name must be a string", name)
self.name = name
self.auto_name = 'auto_' + str(next(self.__count__))
def __str__(self):
if self.name is not None:
return self.name
if self.owner is not None:
op = self.owner.op
if self.index == op.default_output:
return str(self.owner.op) + ".out"
else:
return str(self.owner.op) + "." + str(self.index)
else:
return "<%s>" % str(self.type)
def __repr_test_value__(self):
return repr(theano.gof.op.get_test_value(self))
def __repr__(self, firstPass=True):
to_print = [str(self)]
if config.print_test_value and firstPass:
try:
to_print.append(self.__repr_test_value__())
except AttributeError:
pass
return '\n'.join(to_print)
def clone(self):
# return copy(self)
cp = self.__class__(self.type, None, None, self.name)
cp.tag = copy(self.tag)
return cp
def __lt__(self, other):
raise NotImplementedError('Subclasses of Variable must provide __lt__',
self.__class__.__name__)
def __le__(self, other):
raise NotImplementedError('Subclasses of Variable must provide __le__',
self.__class__.__name__)
def __gt__(self, other):
raise NotImplementedError('Subclasses of Variable must provide __gt__',
self.__class__.__name__)
def __ge__(self, other):
raise NotImplementedError('Subclasses of Variable must provide __ge__',
self.__class__.__name__)
def get_parents(self):
if self.owner is not None:
return [self.owner]
return []
def eval(self, inputs_to_values=None):
if inputs_to_values is None:
inputs_to_values = {}
if not hasattr(self, '_fn_cache'):
self._fn_cache = dict()
inputs = tuple(sorted(inputs_to_values.keys(), key=id))
if inputs not in self._fn_cache:
self._fn_cache[inputs] = theano.function(inputs, self)
args = [inputs_to_values[param] for param in inputs]
rval = self._fn_cache[inputs](*args)
return rval
def __getstate__(self):
d = self.__dict__.copy()
d.pop("_fn_cache", None)
return d
class Constant(Variable):
# __slots__ = ['data']
def __init__(self, type, data, name=None):
Variable.__init__(self, type, None, None, name)
self.data = type.filter(data)
utils.add_tag_trace(self)
def equals(self, other):
# this does what __eq__ should do, but Variable and Apply should always be hashable by id
return isinstance(other, Constant) and self.signature() == other.signature()
def signature(self):
return (self.type, self.data)
def merge_signature(self):
return self.signature()
def __str__(self):
if self.name is not None:
return self.name
else:
name = str(self.data)
if len(name) > 20:
name = name[:10] + '...' + name[-10:]
return 'Constant{%s}' % name
def clone(self):
cp = self.__class__(self.type, self.data, self.name)
cp.tag = copy(self.tag)
return cp
def __set_owner(self, value):
if value is not None:
raise ValueError("Constant instances cannot have an owner.")
owner = property(lambda self: None, __set_owner)
value = property(lambda self: self.data, doc='read-only data access method')
# index is not defined, because the `owner` attribute must necessarily be None
def stack_search(start, expand, mode='bfs', build_inv=False):
if mode not in ('bfs', 'dfs'):
raise ValueError('mode should be bfs or dfs', mode)
rval_set = set()
rval_list = list()
if mode == 'bfs':
start_pop = start.popleft
else:
start_pop = start.pop
expand_inv = {}
while start:
l = start_pop()
if id(l) not in rval_set:
rval_list.append(l)
rval_set.add(id(l))
expand_l = expand(l)
if expand_l:
if build_inv:
for r in expand_l:
expand_inv.setdefault(r, []).append(l)
start.extend(expand_l)
assert len(rval_list) == len(rval_set)
if build_inv:
return rval_list, expand_inv
return rval_list
def ancestors(variable_list, blockers=None):
def expand(r):
if r.owner and (not blockers or r not in blockers):
return reversed(r.owner.inputs)
dfs_variables = stack_search(deque(variable_list), expand, 'dfs')
return dfs_variables
def inputs(variable_list, blockers=None):
vlist = ancestors(variable_list, blockers)
rval = [r for r in vlist if r.owner is None]
return rval
def variables_and_orphans(i, o):
def expand(r):
if r.owner and r not in i:
l = list(r.owner.inputs) + list(r.owner.outputs)
l.reverse()
return l
variables = stack_search(deque(o), expand, 'dfs')
orphans = [r for r in variables if r.owner is None and r not in i]
return variables, orphans
def ops(i, o):
ops = set()
variables, orphans = variables_and_orphans(i, o)
for r in variables:
if r not in i and r not in orphans:
if r.owner is not None:
ops.add(r.owner)
return ops
def variables(i, o):
return variables_and_orphans(i, o)[0]
def orphans(i, o):
return variables_and_orphans(i, o)[1]
def clone(i, o, copy_inputs=True):
equiv = clone_get_equiv(i, o, copy_inputs)
return [equiv[input] for input in i], [equiv[output] for output in o]
def clone_get_equiv(inputs, outputs, copy_inputs_and_orphans=True, memo=None):
if memo is None:
memo = {}
# clone the inputs if necessary
for input in inputs:
if copy_inputs_and_orphans:
cpy = input.clone()
cpy.owner = None
cpy.index = None
memo.setdefault(input, cpy)
else:
memo.setdefault(input, input)
# go through the inputs -> outputs graph cloning as we go
for apply in io_toposort(inputs, outputs):
for input in apply.inputs:
if input not in memo:
if copy_inputs_and_orphans:
cpy = input.clone()
memo[input] = cpy
else:
memo[input] = input
new_apply = apply.clone_with_new_inputs([memo[i] for i in apply.inputs])
memo.setdefault(apply, new_apply)
for output, new_output in zip(apply.outputs, new_apply.outputs):
memo.setdefault(output, new_output)
# finish up by cloning any remaining outputs (it can happen)
for output in outputs:
if output not in memo:
memo[output] = output.clone()
return memo
def general_toposort(r_out, deps, debug_print=False,
compute_deps_cache=None, deps_cache=None,
clients=None):
if compute_deps_cache is None:
deps_cache = {}
def compute_deps_cache(io):
if io not in deps_cache:
d = deps(io)
if d:
if not isinstance(d, (list, OrderedSet)):
raise TypeError(
"Non-deterministic collections here make"
" toposort non-deterministic.")
deps_cache[io] = list(d)
else:
deps_cache[io] = d
return d
else:
return deps_cache[io]
assert deps_cache is not None
assert isinstance(r_out, (tuple, list, deque))
reachable, _clients = stack_search(deque(r_out), compute_deps_cache,
'dfs', True)
if clients is not None:
clients.update(_clients)
sources = deque([r for r in reachable if not deps_cache.get(r, None)])
rset = set()
rlist = []
while sources:
node = sources.popleft()
if node not in rset:
rlist.append(node)
rset.add(node)
for client in _clients.get(node, []):
deps_cache[client] = [a for a in deps_cache[client]
if a is not node]
if not deps_cache[client]:
sources.append(client)
if len(rlist) != len(reachable):
if debug_print:
print('')
print(reachable)
print(rlist)
raise ValueError('graph contains cycles')
return rlist
def io_toposort(inputs, outputs, orderings=None, clients=None):
# the inputs are used only here in the function that decides what 'predecessors' to explore
iset = set(inputs)
# We build 2 functions as a speed up
deps_cache = {}
compute_deps = None
compute_deps_cache = None
if not orderings: # can be None or empty dict
# Specialized function that is faster when no ordering.
# Also include the cache in the function itself for speed up.
def compute_deps_cache(obj):
if obj in deps_cache:
return deps_cache[obj]
rval = []
if obj not in iset:
if isinstance(obj, Variable):
if obj.owner:
rval = [obj.owner]
elif isinstance(obj, Apply):
rval = list(obj.inputs)
if rval:
if not isinstance(rval, (list, OrderedSet)):
raise TypeError(
"Non-deterministic collections here make"
" toposort non-deterministic.")
deps_cache[obj] = list(rval)
else:
deps_cache[obj] = rval
else:
deps_cache[obj] = rval
return rval
else:
def compute_deps(obj):
rval = []
if obj not in iset:
if isinstance(obj, Variable):
if obj.owner:
rval = [obj.owner]
elif isinstance(obj, Apply):
rval = list(obj.inputs)
rval.extend(orderings.get(obj, []))
else:
assert not orderings.get(obj, [])
return rval
topo = general_toposort(outputs, deps=compute_deps,
compute_deps_cache=compute_deps_cache,
deps_cache=deps_cache, clients=clients)
return [o for o in topo if isinstance(o, Apply)]
default_leaf_formatter = str
def default_node_formatter(op, argstrings):
return "%s(%s)" % (op.op, ", ".join(argstrings))
def io_connection_pattern(inputs, outputs):
inner_nodes = io_toposort(inputs, outputs)
# Initialize 'connect_pattern_by_var' by establishing each input as
# connected only to itself
connect_pattern_by_var = {}
nb_inputs = len(inputs)
for i in range(nb_inputs):
input = inputs[i]
inp_connection_pattern = [i == j for j in range(nb_inputs)]
connect_pattern_by_var[input] = inp_connection_pattern
# Iterate through the nodes used to produce the outputs from the
# inputs and, for every node, infer their connection pattern to
# every input from the connection patterns of their parents.
for n in inner_nodes:
# Get the connection pattern of the inner node's op. If the op
try:
op_connection_pattern = n.op.connection_pattern(n)
except AttributeError:
op_connection_pattern = ([[True] * len(n.outputs)] *
len(n.inputs))
for out_idx in range(len(n.outputs)):
out = n.outputs[out_idx]
out_connection_pattern = [False] * nb_inputs
for inp_idx in range(len(n.inputs)):
inp = n.inputs[inp_idx]
if inp in connect_pattern_by_var:
inp_connection_pattern = connect_pattern_by_var[inp]
# If the node output is connected to the node input, it
# means it is connected to every inner input that the
# node inputs is connected to
if op_connection_pattern[inp_idx][out_idx]:
out_connection_pattern = [out_connection_pattern[i] or
inp_connection_pattern[i]
for i in range(nb_inputs)]
# Store the connection pattern of the node output
connect_pattern_by_var[out] = out_connection_pattern
# Obtain the global connection pattern by combining the
# connnection patterns of the individual outputs
global_connection_pattern = [[] for o in range(len(inputs))]
for out in outputs:
out_connection_pattern = connect_pattern_by_var.get(out)
if out_connection_pattern is None:
# the output is completely isolated from inputs
out_connection_pattern = [False] * len(inputs)
for i in range(len(inputs)):
global_connection_pattern[i].append(out_connection_pattern[i])
return global_connection_pattern
def is_same_graph(var1, var2, givens=None, debug=False):
# Lazy import.
if givens is None:
givens = {}
global equal_computations, is_same_graph_with_merge
if equal_computations is None:
from theano.gof.opt import is_same_graph_with_merge
from theano.scan_module.scan_utils import equal_computations
# Convert `givens` to dictionary.
if not isinstance(givens, dict):
givens = dict(givens)
# Get result from the merge-based function.
rval1 = is_same_graph_with_merge(var1=var1, var2=var2, givens=givens)
# Get result from the function `equal_computations` from scan_utils.
use_equal_computations = True
if givens:
# We need to build the `in_xs` and `in_ys` lists. To do this, we need
# to be able to tell whether a variable belongs to the computational
# graph of `var1` or `var2`.
# The typical case we want to handle is when `to_replace` belongs to
# one of these graphs, and `replace_by` belongs to the other one. In
# other situations, the current implementation of `equal_computations`
# is probably not appropriate, so we do not call it.
ok = True
in_xs = []
in_ys = []
# Compute the sets of all variables found in each computational graph.
inputs_var = list(map(inputs, ([var1], [var2])))
all_vars = [set(variables(v_i, v_o))
for v_i, v_o in ((inputs_var[0], [var1]),
(inputs_var[1], [var2]))]
def in_var(x, k):
# Return True iff `x` is in computation graph of variable `vark`.
return x in all_vars[k - 1]
for to_replace, replace_by in iteritems(givens):
# Map a substitution variable to the computational graphs it
# belongs to.
inside = dict((v, [in_var(v, k) for k in (1, 2)])
for v in (to_replace, replace_by))
if (inside[to_replace][0] and not inside[to_replace][1] and
inside[replace_by][1] and not inside[replace_by][0]):
# Substitute variable in `var1` by one from `var2`.
in_xs.append(to_replace)
in_ys.append(replace_by)
elif (inside[to_replace][1] and not inside[to_replace][0] and
inside[replace_by][0] and not inside[replace_by][1]):
# Substitute variable in `var2` by one from `var1`.
in_xs.append(replace_by)
in_ys.append(to_replace)
else:
ok = False
break
if not ok:
# We cannot directly use `equal_computations`.
if debug:
raise AssertionError(
'When `debug` is True we want to make sure we are also '
'using the `equal_computations` implementation')
use_equal_computations = False
else:
in_xs = None
in_ys = None
if use_equal_computations:
rval2 = equal_computations(xs=[var1], ys=[var2],
in_xs=in_xs, in_ys=in_ys)
assert rval2 == rval1
return rval1
def op_as_string(i, op,
leaf_formatter=default_leaf_formatter,
node_formatter=default_node_formatter):
strs = as_string(i, op.inputs, leaf_formatter, node_formatter)
return node_formatter(op, strs)
def as_string(i, o,
leaf_formatter=default_leaf_formatter,
node_formatter=default_node_formatter):
i = set(i)
orph = orphans(i, o)
multi = set()
seen = set()
for output in o:
op = output.owner
if op in seen:
multi.add(op)
else:
seen.add(op)
for op in ops(i, o):
for input in op.inputs:
op2 = input.owner
if input in i or input in orph or op2 is None:
continue
if op2 in seen:
multi.add(op2)
else:
seen.add(input.owner)
multi = [x for x in multi]
done = set()
def multi_index(x):
return multi.index(x) + 1
def describe(r):
if r.owner is not None and r not in i and r not in orph:
op = r.owner
idx = op.outputs.index(r)
if len(op.outputs) == 1:
idxs = ""
else:
idxs = "::%i" % idx
if op in done:
return "*%i%s" % (multi_index(op), idxs)
else:
done.add(op)
s = node_formatter(op, [describe(input) for input in op.inputs])
if op in multi:
return "*%i -> %s" % (multi_index(op), s)
else:
return s
else:
return leaf_formatter(r)
return [describe(output) for output in o]
def view_roots(r):
owner = r.owner
if owner is not None:
try:
view_map = owner.op.view_map
view_map = dict((owner.outputs[o], i)
for o, i in iteritems(view_map))
except AttributeError:
return [r]
if r in view_map:
answer = []
for i in view_map[r]:
answer += view_roots(owner.inputs[i])
return answer
else:
return [r]
else:
return [r]
def list_of_nodes(inputs, outputs):
return stack_search(
deque([o.owner for o in outputs]),
lambda o: [inp.owner for inp in o.inputs
if inp.owner and
not any(i in inp.owner.outputs for i in inputs)])
| true | true |
f71abc3456290bc451a8706a0ce886a5e488584f | 5,121 | py | Python | main.py | maxime-tournier/lcpy | 1419901a089cd088edd078397958b47c25cc2f4f | [
"MIT"
] | null | null | null | main.py | maxime-tournier/lcpy | 1419901a089cd088edd078397958b47c25cc2f4f | [
"MIT"
] | null | null | null | main.py | maxime-tournier/lcpy | 1419901a089cd088edd078397958b47c25cc2f4f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import krylov
import splitting
import tool
import numpy as np
import math
import metric
import accel
def parse_args():
"""command line arguments"""
import argparse
parser = argparse.ArgumentParser(description='benchmark LCP solvers')
parser.add_argument('filenames', nargs='+', help='filename for LCP data')
parser.add_argument('-n', '--iter', type=int, default=100,
help='iteration count')
parser.add_argument('--fontsize', type=int, default=8,
help='font size in plot')
parser.add_argument('--legend', type = int, default = 1,
help='enable/disable legend in plots')
parser.add_argument('--ms', type = int, default = 0,
help='use mass-splitting when available')
parser.add_argument('--eps', type=float, default=1e-8,
help='precision')
parser.add_argument('-i', '--interactive', action = 'store_true',
help='drop to a python shell once finished')
return parser.parse_args()
def pgs(x, (M, q), **kwargs):
"""pgs"""
return splitting.pgs(x, M, -q, **kwargs)
def pjacobi(x, (M, q), **kwargs):
"""pjacobi"""
return splitting.pjacobi(x, M, -q, **kwargs)
def solvers( (M, q), **kwargs ):
"""solver list"""
n = q.size
ms = kwargs.get('ms', None)
s = pgs
s = pjacobi
def mass_splitting(x, (M, q), **kwargs):
"""mass-splitting"""
return s(x, (M, q), diag = ms, omega = kwargs.get('omega', 1.0))
mass_splitting.__doc__ += ' ({})'.format(s.__doc__)
base = s if ms is None else mass_splitting
n = q.size
opts = { 'diag': ms, 'omega': kwargs.get('omega', 1.0)} if ms is not None else {
'diag': np.diag(M), 'omega': 2.0 / n
}
def wrap(s, **kwargs):
def res(x, lcp):
return s(x, lcp, **kwargs)
res.__doc__ = s.__doc__
return res
return [ # base,
# accel.nlnscg(base, diag = opts['diag']),
# accel.nlnscg_ls(base, **opts),
# wrap(accel.cg_pjacobi, **opts),
# wrap(accel.cr_pjacobi, **opts),
# wrap(accel.doubidou, **opts),
# accel.anderson(base, m = 4, reset = False), # metric = opts['diag'] ),
# wrap(accel.andy, **opts),
pgs,
wrap(accel.bokhoven),
wrap(accel.bokhoven_gs),
wrap(accel.bokhoven_chol),
]
args = parse_args()
# solve params
iterations = args.iter
precision = args.eps
# plot setup
cols = min(3, len(args.filenames) )
rows = int(math.ceil(len(args.filenames) / float(cols)))
import matplotlib
from matplotlib import pyplot as plt
for param in [ 'axes.titlesize',
'axes.labelsize',
'xtick.labelsize',
'ytick.labelsize',
'legend.fontsize' ]:
matplotlib.rcParams[param] = args.fontsize
# _, plots = plt.subplots(rows, cols)
_, plots = plt.subplots()
def bench(filename, **kwargs):
(M, q) = tool.load_lcp( filename )
print 'symmetry check:', np.linalg.norm(M - M.transpose())
if args.ms:
try:
ext = '.ms'
split = f.split('.')
if split[-1] == 'gz':
f = '.'.join(split[:-1])
ext += '.gz'
kwargs['ms'] = tool.load_vec( f + ext )
except:
pass
# error metric
error = metric.lcp_error( (M, q),
primal = True,
dual = False,
compl = True )
error = metric.minimum_norm( (M, q) ) #, metric = np.diag(M) )
print 'file:', filename
print 'dim:', q.size
print 'metric:', error.__doc__
p = plots # i / cols, i % cols ] if rows > 1 else plots[ i ] if cols > 1 else plots
np.random.seed()
# initial = np.random.rand( q.size )
initial = np.zeros(q.size)
p.cla()
for s in solvers( (M, q), **kwargs ):
name = s.__doc__
print '\t{}...'.format(name),
run = tool.bench( (M, q), s,
iterations = iterations,
precision = precision,
metric = error,
initial = initial)
data = [e for k, e in run]
print '\tit: {} \t eps: {:.2e}'.format(len(data), data[-1])
# plot
p.plot( data, label = name )
p.set_title('{} (n = {})'.format(filename, q.size))
p.set_yscale('log')
if args.legend: p.legend()
plt.draw()
plt.ion()
for filename in args.filenames:
bench(filename)
plt.show()
print('press enter to continue...')
raw_input()
if args.interactive:
import code
code.interact(None, None, locals())
| 25.477612 | 92 | 0.495411 |
import krylov
import splitting
import tool
import numpy as np
import math
import metric
import accel
def parse_args():
"""command line arguments"""
import argparse
parser = argparse.ArgumentParser(description='benchmark LCP solvers')
parser.add_argument('filenames', nargs='+', help='filename for LCP data')
parser.add_argument('-n', '--iter', type=int, default=100,
help='iteration count')
parser.add_argument('--fontsize', type=int, default=8,
help='font size in plot')
parser.add_argument('--legend', type = int, default = 1,
help='enable/disable legend in plots')
parser.add_argument('--ms', type = int, default = 0,
help='use mass-splitting when available')
parser.add_argument('--eps', type=float, default=1e-8,
help='precision')
parser.add_argument('-i', '--interactive', action = 'store_true',
help='drop to a python shell once finished')
return parser.parse_args()
def pgs(x, (M, q), **kwargs):
"""pgs"""
return splitting.pgs(x, M, -q, **kwargs)
def pjacobi(x, (M, q), **kwargs):
"""pjacobi"""
return splitting.pjacobi(x, M, -q, **kwargs)
def solvers( (M, q), **kwargs ):
"""solver list"""
n = q.size
ms = kwargs.get('ms', None)
s = pgs
s = pjacobi
def mass_splitting(x, (M, q), **kwargs):
"""mass-splitting"""
return s(x, (M, q), diag = ms, omega = kwargs.get('omega', 1.0))
mass_splitting.__doc__ += ' ({})'.format(s.__doc__)
base = s if ms is None else mass_splitting
n = q.size
opts = { 'diag': ms, 'omega': kwargs.get('omega', 1.0)} if ms is not None else {
'diag': np.diag(M), 'omega': 2.0 / n
}
def wrap(s, **kwargs):
def res(x, lcp):
return s(x, lcp, **kwargs)
res.__doc__ = s.__doc__
return res
return [
pgs,
wrap(accel.bokhoven),
wrap(accel.bokhoven_gs),
wrap(accel.bokhoven_chol),
]
args = parse_args()
iterations = args.iter
precision = args.eps
cols = min(3, len(args.filenames) )
rows = int(math.ceil(len(args.filenames) / float(cols)))
import matplotlib
from matplotlib import pyplot as plt
for param in [ 'axes.titlesize',
'axes.labelsize',
'xtick.labelsize',
'ytick.labelsize',
'legend.fontsize' ]:
matplotlib.rcParams[param] = args.fontsize
_, plots = plt.subplots()
def bench(filename, **kwargs):
(M, q) = tool.load_lcp( filename )
print 'symmetry check:', np.linalg.norm(M - M.transpose())
if args.ms:
try:
ext = '.ms'
split = f.split('.')
if split[-1] == 'gz':
f = '.'.join(split[:-1])
ext += '.gz'
kwargs['ms'] = tool.load_vec( f + ext )
except:
pass
error = metric.lcp_error( (M, q),
primal = True,
dual = False,
compl = True )
error = metric.minimum_norm( (M, q) )
print 'file:', filename
print 'dim:', q.size
print 'metric:', error.__doc__
p = plots
np.random.seed()
initial = np.zeros(q.size)
p.cla()
for s in solvers( (M, q), **kwargs ):
name = s.__doc__
print '\t{}...'.format(name),
run = tool.bench( (M, q), s,
iterations = iterations,
precision = precision,
metric = error,
initial = initial)
data = [e for k, e in run]
print '\tit: {} \t eps: {:.2e}'.format(len(data), data[-1])
p.plot( data, label = name )
p.set_title('{} (n = {})'.format(filename, q.size))
p.set_yscale('log')
if args.legend: p.legend()
plt.draw()
plt.ion()
for filename in args.filenames:
bench(filename)
plt.show()
print('press enter to continue...')
raw_input()
if args.interactive:
import code
code.interact(None, None, locals())
| false | true |
f71abc41fa2bd110c77062474f73a192caded073 | 2,015 | py | Python | tools/perf/page_sets/intl_ja_zh.py | kjthegod/chromium | cf940f7f418436b77e15b1ea23e6fa100ca1c91a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2019-11-28T10:46:52.000Z | 2019-11-28T10:46:52.000Z | tools/perf/page_sets/intl_ja_zh.py | kjthegod/chromium | cf940f7f418436b77e15b1ea23e6fa100ca1c91a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | tools/perf/page_sets/intl_ja_zh.py | kjthegod/chromium | cf940f7f418436b77e15b1ea23e6fa100ca1c91a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2015-03-27T11:15:39.000Z | 2016-08-17T14:19:56.000Z | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class IntlJaZhPage(page_module.Page):
def __init__(self, url, page_set):
super(IntlJaZhPage, self).__init__(url=url, page_set=page_set)
self.user_agent_type = 'desktop'
self.archive_data_file = 'data/intl_ja_zh.json'
def RunPageInteractions(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollPage()
interaction.End()
class IntlJaZhPageSet(page_set_module.PageSet):
""" Popular pages in Japanese and Chinese. """
def __init__(self):
super(IntlJaZhPageSet, self).__init__(
user_agent_type='desktop',
archive_data_file='data/intl_ja_zh.json',
bucket=page_set_module.PARTNER_BUCKET)
urls_list = [
# Why: #5 Japanese site
'http://www.amazon.co.jp',
'http://mixi.jp/',
'http://dtiblog.com/',
'http://2ch.net/',
'http://jugem.jp/',
'http://hatena.ne.jp/',
'http://goo.ne.jp/',
# Why: #1 Japanese site
'http://www.yahoo.co.jp/',
# Why: #3 Japanese site
'http://fc2.com/ja/',
'http://kakaku.com/',
'http://zol.com.cn/',
'http://cn.yahoo.com/',
# Why: #1 Chinese site
'http://www.baidu.com/s?wd=%D0%C2%20%CE%C5',
# Why: #2 Chinese site
'http://www.qq.com/',
# Why: #3 Chinese site
'http://www.taobao.com/index_global.php',
# Why: #4 Chinese site
'http://www.sina.com.cn/',
# Why: #5 Chinese site
# pylint: disable=C0301
'http://www.google.com.hk/#q=%E9%82%84%E6%8F%90%E4%BE%9B&fp=c44d333e710cb480',
'http://udn.com/NEWS/mainpage.shtml',
'http://ruten.com.tw/'
]
for url in urls_list:
self.AddUserStory(IntlJaZhPage(url, self))
| 31 | 84 | 0.63871 |
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class IntlJaZhPage(page_module.Page):
def __init__(self, url, page_set):
super(IntlJaZhPage, self).__init__(url=url, page_set=page_set)
self.user_agent_type = 'desktop'
self.archive_data_file = 'data/intl_ja_zh.json'
def RunPageInteractions(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollPage()
interaction.End()
class IntlJaZhPageSet(page_set_module.PageSet):
def __init__(self):
super(IntlJaZhPageSet, self).__init__(
user_agent_type='desktop',
archive_data_file='data/intl_ja_zh.json',
bucket=page_set_module.PARTNER_BUCKET)
urls_list = [
ww.amazon.co.jp',
'http://mixi.jp/',
'http://dtiblog.com/',
'http://2ch.net/',
'http://jugem.jp/',
'http://hatena.ne.jp/',
'http://goo.ne.jp/',
ww.yahoo.co.jp/',
c2.com/ja/',
'http://kakaku.com/',
'http://zol.com.cn/',
'http://cn.yahoo.com/',
www.baidu.com/s?wd=%D0%C2%20%CE%C5',
www.qq.com/',
www.taobao.com/index_global.php',
www.sina.com.cn/',
http://www.google.com.hk/#q=%E9%82%84%E6%8F%90%E4%BE%9B&fp=c44d333e710cb480',
'http://udn.com/NEWS/mainpage.shtml',
'http://ruten.com.tw/'
]
for url in urls_list:
self.AddUserStory(IntlJaZhPage(url, self))
| true | true |
f71abc4360294fb27af4d518e22ffc96882ac8b4 | 1,646 | py | Python | src/util/losses.py | anglixjtu/MeshCNN_ | 83826e66d8989ed4967047c2ed6d099568c5781c | [
"MIT"
] | 2 | 2021-08-02T05:39:43.000Z | 2021-08-04T04:15:02.000Z | src/util/losses.py | anglixjtu/MeshCNN_ | 83826e66d8989ed4967047c2ed6d099568c5781c | [
"MIT"
] | null | null | null | src/util/losses.py | anglixjtu/MeshCNN_ | 83826e66d8989ed4967047c2ed6d099568c5781c | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
class ChamferLoss(nn.Module):
def __init__(self):
super(ChamferLoss, self).__init__()
self.use_cuda = torch.cuda.is_available()
def forward(self, preds, gts, reverse=True, bidirectional=True):
def compute_loss(preds, gts):
P = self.batch_pairwise_dist(gts, preds)
mins, _ = torch.min(P, 1)
loss_1 = torch.sum(mins)
mins, _ = torch.min(P, 2)
loss_2 = torch.sum(mins)
return loss_1 + loss_2
if bidirectional or reverse:
backward_loss = compute_loss(gts, preds)
if reverse:
return backward_loss
else:
forward_loss = compute_loss(preds, gts)
return forward_loss + backward_loss
else:
forward_loss = compute_loss(preds, gts)
return forward_loss
def batch_pairwise_dist(self, x, y):
bs, num_points_x, points_dim = x.size()
_, num_points_y, _ = y.size()
xx = torch.bmm(x, x.transpose(2, 1))
yy = torch.bmm(y, y.transpose(2, 1))
zz = torch.bmm(x, y.transpose(2, 1))
if self.use_cuda:
dtype = torch.cuda.LongTensor
else:
dtype = torch.LongTensor
diag_ind_x = torch.arange(0, num_points_x).type(dtype)
diag_ind_y = torch.arange(0, num_points_y).type(dtype)
rx = xx[:, diag_ind_x, diag_ind_x].unsqueeze(1).expand_as(
zz.transpose(2, 1))
ry = yy[:, diag_ind_y, diag_ind_y].unsqueeze(1).expand_as(zz)
P = rx.transpose(2, 1) + ry - 2 * zz
return P | 35.021277 | 69 | 0.572904 | import torch
import torch.nn as nn
class ChamferLoss(nn.Module):
def __init__(self):
super(ChamferLoss, self).__init__()
self.use_cuda = torch.cuda.is_available()
def forward(self, preds, gts, reverse=True, bidirectional=True):
def compute_loss(preds, gts):
P = self.batch_pairwise_dist(gts, preds)
mins, _ = torch.min(P, 1)
loss_1 = torch.sum(mins)
mins, _ = torch.min(P, 2)
loss_2 = torch.sum(mins)
return loss_1 + loss_2
if bidirectional or reverse:
backward_loss = compute_loss(gts, preds)
if reverse:
return backward_loss
else:
forward_loss = compute_loss(preds, gts)
return forward_loss + backward_loss
else:
forward_loss = compute_loss(preds, gts)
return forward_loss
def batch_pairwise_dist(self, x, y):
bs, num_points_x, points_dim = x.size()
_, num_points_y, _ = y.size()
xx = torch.bmm(x, x.transpose(2, 1))
yy = torch.bmm(y, y.transpose(2, 1))
zz = torch.bmm(x, y.transpose(2, 1))
if self.use_cuda:
dtype = torch.cuda.LongTensor
else:
dtype = torch.LongTensor
diag_ind_x = torch.arange(0, num_points_x).type(dtype)
diag_ind_y = torch.arange(0, num_points_y).type(dtype)
rx = xx[:, diag_ind_x, diag_ind_x].unsqueeze(1).expand_as(
zz.transpose(2, 1))
ry = yy[:, diag_ind_y, diag_ind_y].unsqueeze(1).expand_as(zz)
P = rx.transpose(2, 1) + ry - 2 * zz
return P | true | true |
f71abc9fb39ef5fd0daeb69a86632bd9e5ed8709 | 5,028 | py | Python | pytorch_toolkit/nncf/examples/object_detection/layers/modules/multibox_loss.py | morkovka1337/openvino_training_extensions | 846db45c264d6b061505213f51763520b9432ba9 | [
"Apache-2.0"
] | 3 | 2020-12-29T02:47:32.000Z | 2021-11-12T08:12:51.000Z | pytorch_toolkit/nncf/examples/object_detection/layers/modules/multibox_loss.py | morkovka1337/openvino_training_extensions | 846db45c264d6b061505213f51763520b9432ba9 | [
"Apache-2.0"
] | 23 | 2020-09-25T22:41:48.000Z | 2021-12-13T20:43:37.000Z | pytorch_toolkit/nncf/examples/object_detection/layers/modules/multibox_loss.py | morkovka1337/openvino_training_extensions | 846db45c264d6b061505213f51763520b9432ba9 | [
"Apache-2.0"
] | 1 | 2021-03-12T10:08:44.000Z | 2021-03-12T10:08:44.000Z | """
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..box_utils import match, log_sum_exp
class MultiBoxLoss(nn.Module):
"""SSD Weighted Loss Function
Compute Targets:
1) Produce Confidence Target Indices by matching ground truth boxes
with (default) 'priorboxes' that have jaccard index > threshold parameter
(default threshold: 0.5).
2) Produce localization target by 'encoding' variance into offsets of ground
truth boxes and their matched 'priorboxes'.
3) Hard negative mining to filter the excessive number of negative examples
that comes with using a large number of default bounding boxes.
(default negative:positive ratio 3:1)
Objective Loss:
L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss
weighted by α which is set to 1 by cross val.
Args:
c: class confidences,
l: predicted boxes,
g: ground truth boxes
N: number of matched default boxes
See: https://arxiv.org/pdf/1512.02325.pdf for more details.
"""
def __init__(self, cfg, num_classes, overlap_thresh, prior_for_matching,
bkg_label, neg_mining, neg_pos, neg_overlap, encode_target, device=None):
super(MultiBoxLoss, self).__init__()
self.device = device
self.num_classes = num_classes
self.threshold = overlap_thresh
self.background_label = bkg_label
self.encode_target = encode_target
self.use_prior_for_matching = prior_for_matching
self.do_neg_mining = neg_mining
self.negpos_ratio = neg_pos
self.neg_overlap = neg_overlap
def forward(self, predictions, targets):
"""Multibox Loss
Args:
predictions (tuple): A tuple containing loc preds, conf preds,
and prior boxes from SSD net.
conf shape: torch.size(batch_size,num_priors,num_classes)
loc shape: torch.size(batch_size,num_priors,4)
priors shape: torch.size(num_priors,4)
ground_truth (tensor): Ground truth boxes and labels for a batch,
shape: [batch_size,num_objs,5] (last idx is the label).
"""
loc_data, conf_data, priors = predictions
batch = loc_data.size(0)
num_priors = loc_data.size(1)
# match priors (default boxes) and ground truth boxes
loc_t = torch.Tensor(batch, num_priors, 4).to(self.device)
conf_t = torch.LongTensor(batch, num_priors).to(self.device)
for idx in range(batch):
truths = targets[idx][:, :-1].data
labels = targets[idx][:, -1].data
defaults = priors.data
match(self.threshold, truths, defaults[0], labels, loc_t, conf_t, idx)
pos = conf_t > 0
num_pos = pos.sum(dim=1, keepdim=True)
# Localization Loss (Smooth L1)
# Shape: [batch,num_priors,4]
pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data)
loc_p = loc_data[pos_idx].view(-1, 4)
loc_t = loc_t[pos_idx].view(-1, 4)
loss_l = F.smooth_l1_loss(loc_p, loc_t, reduction='sum')
# Compute max conf across batch for hard negative mining
batch_conf = conf_data.view(-1, self.num_classes)
loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1))
# Hard Negative Mining
loss_c = loss_c.view(batch, -1)
loss_c[pos] = 0 # filter out pos boxes for now
_, loss_idx = loss_c.sort(1, descending=True)
_, idx_rank = loss_idx.sort(1)
num_pos = pos.long().sum(1, keepdim=True)
num_neg = torch.clamp(self.negpos_ratio * num_pos, max=pos.size(1) - 1)
neg = idx_rank < num_neg.expand_as(idx_rank)
# Confidence Loss Including Positive and Negative Examples
pos_idx = pos.unsqueeze(2).expand_as(conf_data)
neg_idx = neg.unsqueeze(2).expand_as(conf_data)
conf_p = conf_data[(pos_idx + neg_idx).gt(0)].view(-1, self.num_classes)
targets_weighted = conf_t[(pos + neg).gt(0)]
loss_c = F.cross_entropy(conf_p, targets_weighted, reduction='sum')
# Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
N = num_pos.data.sum().to(torch.float)
loss_l /= N
loss_c /= N
return loss_l, loss_c
| 42.610169 | 90 | 0.646181 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..box_utils import match, log_sum_exp
class MultiBoxLoss(nn.Module):
def __init__(self, cfg, num_classes, overlap_thresh, prior_for_matching,
bkg_label, neg_mining, neg_pos, neg_overlap, encode_target, device=None):
super(MultiBoxLoss, self).__init__()
self.device = device
self.num_classes = num_classes
self.threshold = overlap_thresh
self.background_label = bkg_label
self.encode_target = encode_target
self.use_prior_for_matching = prior_for_matching
self.do_neg_mining = neg_mining
self.negpos_ratio = neg_pos
self.neg_overlap = neg_overlap
def forward(self, predictions, targets):
loc_data, conf_data, priors = predictions
batch = loc_data.size(0)
num_priors = loc_data.size(1)
loc_t = torch.Tensor(batch, num_priors, 4).to(self.device)
conf_t = torch.LongTensor(batch, num_priors).to(self.device)
for idx in range(batch):
truths = targets[idx][:, :-1].data
labels = targets[idx][:, -1].data
defaults = priors.data
match(self.threshold, truths, defaults[0], labels, loc_t, conf_t, idx)
pos = conf_t > 0
num_pos = pos.sum(dim=1, keepdim=True)
pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data)
loc_p = loc_data[pos_idx].view(-1, 4)
loc_t = loc_t[pos_idx].view(-1, 4)
loss_l = F.smooth_l1_loss(loc_p, loc_t, reduction='sum')
batch_conf = conf_data.view(-1, self.num_classes)
loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1))
loss_c = loss_c.view(batch, -1)
loss_c[pos] = 0
_, loss_idx = loss_c.sort(1, descending=True)
_, idx_rank = loss_idx.sort(1)
num_pos = pos.long().sum(1, keepdim=True)
num_neg = torch.clamp(self.negpos_ratio * num_pos, max=pos.size(1) - 1)
neg = idx_rank < num_neg.expand_as(idx_rank)
pos_idx = pos.unsqueeze(2).expand_as(conf_data)
neg_idx = neg.unsqueeze(2).expand_as(conf_data)
conf_p = conf_data[(pos_idx + neg_idx).gt(0)].view(-1, self.num_classes)
targets_weighted = conf_t[(pos + neg).gt(0)]
loss_c = F.cross_entropy(conf_p, targets_weighted, reduction='sum')
N = num_pos.data.sum().to(torch.float)
loss_l /= N
loss_c /= N
return loss_l, loss_c
| true | true |
f71abcf577a388a4d364848c076ea89cef2f516f | 6,184 | py | Python | code/shared/pca/pca.py | amarallab/waldo | e38d23d9474a0bcb7a94e685545edb0115b12af4 | [
"MIT"
] | null | null | null | code/shared/pca/pca.py | amarallab/waldo | e38d23d9474a0bcb7a94e685545edb0115b12af4 | [
"MIT"
] | null | null | null | code/shared/pca/pca.py | amarallab/waldo | e38d23d9474a0bcb7a94e685545edb0115b12af4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
""" a small class for Principal Component Analysis
Usage:
p = PCA( A, fraction=0.90 )
In:
A: an array of e.g. 1000 observations x 20 variables, 1000 rows x 20 columns
fraction: use principal components that account for e.g.
90 % of the total variance
Out:
p.U, p.d, p.Vt: from numpy.linalg.svd, A = U . d . Vt
p.dinv: 1/d or 0, see NR
p.eigen: the eigenvalues of A*A, in decreasing order (p.d**2).
eigen[j] / eigen.sum() is variable j's fraction of the total variance;
look at the first few eigen[] to see how many PCs get to 90 %, 95 % ...
p.npc: number of principal components,
e.g. 2 if the top 2 eigenvalues are >= `fraction` of the total.
It's ok to change this; methods use the current value.
Methods:
The methods of class PCA transform vectors or arrays of e.g.
20 variables, 2 principal components and 1000 observations,
using partial matrices U' d' Vt', parts of the full U d Vt:
A ~ U' . d' . Vt' where e.g.
U' is 1000 x 2
d' is diag([ d0, d1 ]), the 2 largest singular values
Vt' is 2 x 20. Dropping the primes,
d . Vt 2 principal vars = p.vars_pc( 20 vars )
U 1000 obs = p.pc_obs( 2 principal vars )
U . d . Vt 1000 obs, p.obs( 20 vars ) = pc_obs( vars_pc( vars ))
fast approximate A . vars, using the `npc` principal components
Ut 2 pcs = p.obs_pc( 1000 obs )
V . dinv 20 vars = p.pc_vars( 2 principal vars )
V . dinv . Ut 20 vars, p.vars( 1000 obs ) = pc_vars( obs_pc( obs )),
fast approximate Ainverse . obs: vars that give ~ those obs.
Notes:
PCA does not center or scale A; you usually want to first
A -= A.mean(A, axis=0)
A /= A.std(A, axis=0)
with the little class Center or the like, below.
See also:
http://en.wikipedia.org/wiki/Principal_component_analysis
http://en.wikipedia.org/wiki/Singular_value_decomposition
Press et al., Numerical Recipes (2 or 3 ed), SVD
PCA micro-tutorial
iris-pca .py .png
"""
from __future__ import division
import numpy as np
dot = np.dot
# import bz.numpyutil as nu
# dot = nu.pdot
__version__ = "2010-04-14 apr"
__author_email__ = "denis-bz-py at t-online dot de"
#...............................................................................
class PCA:
def __init__( self, A, fraction=0.90 ):
assert 0 <= fraction <= 1
# A = U . diag(d) . Vt, O( m n^2 ), lapack_lite --
self.U, self.d, self.Vt = np.linalg.svd( A, full_matrices=False )
assert np.all( self.d[:-1] >= self.d[1:] ) # sorted
self.eigen = self.d**2
self.sumvariance = np.cumsum(self.eigen)
self.sumvariance /= self.sumvariance[-1]
self.npc = np.searchsorted( self.sumvariance, fraction ) + 1
self.dinv = np.array([ 1/d if d > self.d[0] * 1e-6 else 0
for d in self.d ])
def pc( self ):
""" e.g. 1000 x 2 U[:, :npc] * d[:npc], to plot etc. """
n = self.npc
return self.U[:, :n] * self.d[:n]
# These 1-line methods may not be worth the bother;
# then use U d Vt directly --
def vars_pc( self, x ):
n = self.npc
return self.d[:n] * dot( self.Vt[:n], x.T ).T # 20 vars -> 2 principal
def pc_vars( self, p ):
n = self.npc
return dot( self.Vt[:n].T, (self.dinv[:n] * p).T ) .T # 2 PC -> 20 vars
def pc_obs( self, p ):
n = self.npc
return dot( self.U[:, :n], p.T ) # 2 principal -> 1000 obs
def obs_pc( self, obs ):
n = self.npc
return dot( self.U[:, :n].T, obs ) .T # 1000 obs -> 2 principal
def obs( self, x ):
return self.pc_obs( self.vars_pc(x) ) # 20 vars -> 2 principal -> 1000 obs
def vars( self, obs ):
return self.pc_vars( self.obs_pc(obs) ) # 1000 obs -> 2 principal -> 20 vars
class Center:
""" A -= A.mean() /= A.std(), inplace -- use A.copy() if need be
uncenter(x) == original A . x
"""
# mttiw
def __init__( self, A, axis=0, scale=True, verbose=1 ):
self.mean = A.mean(axis=axis)
if verbose:
print "Center -= A.mean:", self.mean
A -= self.mean
if scale:
std = A.std(axis=axis)
self.std = np.where( std, std, 1. )
if verbose:
print "Center /= A.std:", self.std
A /= self.std
else:
self.std = np.ones( A.shape[-1] )
self.A = A
def uncenter( self, x ):
return np.dot( self.A, x * self.std ) + np.dot( x, self.mean )
#...............................................................................
if __name__ == "__main__":
import sys
csv = "iris4.csv" # wikipedia Iris_flower_data_set
# 5.1,3.5,1.4,0.2 # ,Iris-setosa ...
N = 1000
K = 20
fraction = .90
seed = 1
exec "\n".join( sys.argv[1:] ) # N= ...
np.random.seed(seed)
np.set_printoptions(1, threshold=100, suppress=True ) # .1f
try:
A = np.genfromtxt( csv, delimiter="," )
N, K = A.shape
except IOError:
A = np.random.normal( size=(N, K) ) # gen correlated ?
print "csv: %s N: %d K: %d fraction: %.2g" % (csv, N, K, fraction)
Center(A)
print "A:", A
print "PCA ..." ,
p = PCA( A, fraction=fraction )
print "npc:", p.npc
print "% variance:", p.sumvariance * 100
print "Vt[0], weights that give PC 0:", p.Vt[0]
print "A . Vt[0]:", dot( A, p.Vt[0] )
print "pc:", p.pc()
print "\nobs <-> pc <-> x: with fraction=1, diffs should be ~ 0"
x = np.ones(K)
# x = np.ones(( 3, K ))
print "x:", x
pc = p.vars_pc(x) # d' Vt' x
print "vars_pc(x):", pc
print "back to ~ x:", p.pc_vars(pc)
Ax = dot( A, x.T )
pcx = p.obs(x) # U' d' Vt' x
print "Ax:", Ax
print "A'x:", pcx
print "max |Ax - A'x|: %.2g" % np.linalg.norm( Ax - pcx, np.inf )
b = Ax # ~ back to original x, Ainv A x
back = p.vars(b)
print "~ back again:", back
print "max |back - x|: %.2g" % np.linalg.norm( back - x, np.inf )
# end pca.py | 33.608696 | 85 | 0.537516 |
""" a small class for Principal Component Analysis
Usage:
p = PCA( A, fraction=0.90 )
In:
A: an array of e.g. 1000 observations x 20 variables, 1000 rows x 20 columns
fraction: use principal components that account for e.g.
90 % of the total variance
Out:
p.U, p.d, p.Vt: from numpy.linalg.svd, A = U . d . Vt
p.dinv: 1/d or 0, see NR
p.eigen: the eigenvalues of A*A, in decreasing order (p.d**2).
eigen[j] / eigen.sum() is variable j's fraction of the total variance;
look at the first few eigen[] to see how many PCs get to 90 %, 95 % ...
p.npc: number of principal components,
e.g. 2 if the top 2 eigenvalues are >= `fraction` of the total.
It's ok to change this; methods use the current value.
Methods:
The methods of class PCA transform vectors or arrays of e.g.
20 variables, 2 principal components and 1000 observations,
using partial matrices U' d' Vt', parts of the full U d Vt:
A ~ U' . d' . Vt' where e.g.
U' is 1000 x 2
d' is diag([ d0, d1 ]), the 2 largest singular values
Vt' is 2 x 20. Dropping the primes,
d . Vt 2 principal vars = p.vars_pc( 20 vars )
U 1000 obs = p.pc_obs( 2 principal vars )
U . d . Vt 1000 obs, p.obs( 20 vars ) = pc_obs( vars_pc( vars ))
fast approximate A . vars, using the `npc` principal components
Ut 2 pcs = p.obs_pc( 1000 obs )
V . dinv 20 vars = p.pc_vars( 2 principal vars )
V . dinv . Ut 20 vars, p.vars( 1000 obs ) = pc_vars( obs_pc( obs )),
fast approximate Ainverse . obs: vars that give ~ those obs.
Notes:
PCA does not center or scale A; you usually want to first
A -= A.mean(A, axis=0)
A /= A.std(A, axis=0)
with the little class Center or the like, below.
See also:
http://en.wikipedia.org/wiki/Principal_component_analysis
http://en.wikipedia.org/wiki/Singular_value_decomposition
Press et al., Numerical Recipes (2 or 3 ed), SVD
PCA micro-tutorial
iris-pca .py .png
"""
from __future__ import division
import numpy as np
dot = np.dot
# import bz.numpyutil as nu
# dot = nu.pdot
__version__ = "2010-04-14 apr"
__author_email__ = "denis-bz-py at t-online dot de"
#...............................................................................
class PCA:
def __init__( self, A, fraction=0.90 ):
assert 0 <= fraction <= 1
# A = U . diag(d) . Vt, O( m n^2 ), lapack_lite --
self.U, self.d, self.Vt = np.linalg.svd( A, full_matrices=False )
assert np.all( self.d[:-1] >= self.d[1:] ) # sorted
self.eigen = self.d**2
self.sumvariance = np.cumsum(self.eigen)
self.sumvariance /= self.sumvariance[-1]
self.npc = np.searchsorted( self.sumvariance, fraction ) + 1
self.dinv = np.array([ 1/d if d > self.d[0] * 1e-6 else 0
for d in self.d ])
def pc( self ):
""" e.g. 1000 x 2 U[:, :npc] * d[:npc], to plot etc. """
n = self.npc
return self.U[:, :n] * self.d[:n]
# These 1-line methods may not be worth the bother;
# then use U d Vt directly --
def vars_pc( self, x ):
n = self.npc
return self.d[:n] * dot( self.Vt[:n], x.T ).T # 20 vars -> 2 principal
def pc_vars( self, p ):
n = self.npc
return dot( self.Vt[:n].T, (self.dinv[:n] * p).T ) .T # 2 PC -> 20 vars
def pc_obs( self, p ):
n = self.npc
return dot( self.U[:, :n], p.T ) # 2 principal -> 1000 obs
def obs_pc( self, obs ):
n = self.npc
return dot( self.U[:, :n].T, obs ) .T # 1000 obs -> 2 principal
def obs( self, x ):
return self.pc_obs( self.vars_pc(x) ) # 20 vars -> 2 principal -> 1000 obs
def vars( self, obs ):
return self.pc_vars( self.obs_pc(obs) ) # 1000 obs -> 2 principal -> 20 vars
class Center:
""" A -= A.mean() /= A.std(), inplace -- use A.copy() if need be
uncenter(x) == original A . x
"""
# mttiw
def __init__( self, A, axis=0, scale=True, verbose=1 ):
self.mean = A.mean(axis=axis)
if verbose:
print "Center -= A.mean:", self.mean
A -= self.mean
if scale:
std = A.std(axis=axis)
self.std = np.where( std, std, 1. )
if verbose:
print "Center /= A.std:", self.std
A /= self.std
else:
self.std = np.ones( A.shape[-1] )
self.A = A
def uncenter( self, x ):
return np.dot( self.A, x * self.std ) + np.dot( x, self.mean )
#...............................................................................
if __name__ == "__main__":
import sys
csv = "iris4.csv" # wikipedia Iris_flower_data_set
# 5.1,3.5,1.4,0.2 # ,Iris-setosa ...
N = 1000
K = 20
fraction = .90
seed = 1
exec "\n".join( sys.argv[1:] ) # N= ...
np.random.seed(seed)
np.set_printoptions(1, threshold=100, suppress=True ) # .1f
try:
A = np.genfromtxt( csv, delimiter="," )
N, K = A.shape
except IOError:
A = np.random.normal( size=(N, K) ) # gen correlated ?
print "csv: %s N: %d K: %d fraction: %.2g" % (csv, N, K, fraction)
Center(A)
print "A:", A
print "PCA ..." ,
p = PCA( A, fraction=fraction )
print "npc:", p.npc
print "% variance:", p.sumvariance * 100
print "Vt[0], weights that give PC 0:", p.Vt[0]
print "A . Vt[0]:", dot( A, p.Vt[0] )
print "pc:", p.pc()
print "\nobs <-> pc <-> x: with fraction=1, diffs should be ~ 0"
x = np.ones(K)
# x = np.ones(( 3, K ))
print "x:", x
pc = p.vars_pc(x) # d' Vt' x
print "vars_pc(x):", pc
print "back to ~ x:", p.pc_vars(pc)
Ax = dot( A, x.T )
pcx = p.obs(x) # U' d' Vt' x
print "Ax:", Ax
print "A'x:", pcx
print "max |Ax - A'x|: %.2g" % np.linalg.norm( Ax - pcx, np.inf )
b = Ax
back = p.vars(b)
print "~ back again:", back
print "max |back - x|: %.2g" % np.linalg.norm( back - x, np.inf )
| false | true |
f71abd4fc53838f6ee6c2abce3c48015aa6d6d6c | 1,513 | py | Python | src/gluonts/transform/dataset.py | lfywork/gluon-ts | 399dbad20f6e78685b707a30817b3a2f97925f8a | [
"Apache-2.0"
] | 1 | 2021-08-22T19:42:55.000Z | 2021-08-22T19:42:55.000Z | src/gluonts/transform/dataset.py | lfywork/gluon-ts | 399dbad20f6e78685b707a30817b3a2f97925f8a | [
"Apache-2.0"
] | null | null | null | src/gluonts/transform/dataset.py | lfywork/gluon-ts | 399dbad20f6e78685b707a30817b3a2f97925f8a | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import Iterator, List
from gluonts.dataset.common import DataEntry, Dataset
from gluonts.transform import Chain, Transformation
class TransformedDataset(Dataset):
"""
A dataset that corresponds to applying a list of transformations to each
element in the base_dataset.
This only supports SimpleTransformations, which do the same thing at
prediction and training time.
Parameters
----------
base_dataset
Dataset to transform
transformations
List of transformations to apply
"""
def __init__(
self, base_dataset: Dataset, transformations: List[Transformation]
) -> None:
self.base_dataset = base_dataset
self.transformations = Chain(transformations)
def __iter__(self) -> Iterator[DataEntry]:
yield from self.transformations(self.base_dataset, is_train=True)
def __len__(self):
return sum(1 for _ in self)
| 31.520833 | 76 | 0.723067 |
from typing import Iterator, List
from gluonts.dataset.common import DataEntry, Dataset
from gluonts.transform import Chain, Transformation
class TransformedDataset(Dataset):
def __init__(
self, base_dataset: Dataset, transformations: List[Transformation]
) -> None:
self.base_dataset = base_dataset
self.transformations = Chain(transformations)
def __iter__(self) -> Iterator[DataEntry]:
yield from self.transformations(self.base_dataset, is_train=True)
def __len__(self):
return sum(1 for _ in self)
| true | true |
f71abea7e87d7a468b3566906416d9861f1ed252 | 2,585 | py | Python | db.py | tunir27/Attendr-Hardware-Scripts | cdc9293157d1810c2a9c8af0318b04203a8b2bf5 | [
"Apache-2.0"
] | 1 | 2018-08-15T06:27:53.000Z | 2018-08-15T06:27:53.000Z | db.py | tunir27/Attendr-Hardware-Scripts | cdc9293157d1810c2a9c8af0318b04203a8b2bf5 | [
"Apache-2.0"
] | null | null | null | db.py | tunir27/Attendr-Hardware-Scripts | cdc9293157d1810c2a9c8af0318b04203a8b2bf5 | [
"Apache-2.0"
] | null | null | null | import sqlite3
import datetime
import time
#import Read1
#import sync
#from datetime import datetime
conn = sqlite3.connect('att.db')
c = conn.cursor()
def db(sid):
#conn = sqlite3.connect('att.db')
#c = conn.cursor()
start_time = time.time()
c.execute('''CREATE TABLE IF NOT EXISTS attendance(ID integer PRIMARY KEY,std_id varchar2,entry_date varchar2,entry_time varchar2,leave_time varchar2,duration varchar2,status varchar2)''')
#print("Enter the values to be inserted")
#print("Student ID")
std_id=sid
t = (std_id,)
c.execute('SELECT * FROM attendance where std_id=?',t)
d=c.fetchone()
#print(d)
if d:
#c.execute('SELECT entry_time FROM attendance where std_id=?',t)
datetime_object = datetime.datetime.strptime(d[3],'%H:%M:%S')
dtime=datetime_object.strftime("%H:%M:%S")
FMT = "%H:%M:%S"
now = datetime.datetime.now()
ntime=now.strftime("%H:%M:%S")
date = datetime.datetime.strptime(str(ntime), FMT) - datetime.datetime.strptime(str(dtime), FMT)
tdelta = datetime.datetime.strptime(str(date),"%H:%M:%S")
#h,m,s=tdelta.split(':')
rtime=int(tdelta.hour)*60+int(tdelta.minute)+(int(tdelta.second)/60)
#print(rtime)
#chtime=datetime.datetime.now()-datetime.timedelta(minutes=30)
if rtime>1:
exit_att(std_id,d[3])
#entry_att(std_id)
#print("Data Inserted")
else:
entry_att(std_id)
#print("Data Inserted")
#c.execute('''drop table attendance''')
#entry_att(std_id)
#printr()
#sync()
#conn.close()
#print(time.time()-start_time)
def entry_att(std_id):
now = datetime.datetime.now()
date=now.strftime("%y/%m/%d")
time=now.strftime("%H:%M:%S")
c.execute('''INSERT INTO attendance(std_id,entry_date,entry_time,status) values(?,?,?,?)''',(std_id,date,time,'0'))
conn.commit()
def exit_att(std_id,ptime):
now = datetime.datetime.now()
#date=now.strftime("%Y-%m-%d")
ltime=now.strftime("%H:%M:%S")
FMT = '%H:%M:%S'
duration = datetime.datetime.strptime(str(ltime), FMT) - datetime.datetime.strptime(str(ptime), FMT)
utime=datetime.datetime.strptime(str(duration),"%H:%M:%S")
dtime=utime.strftime("%H:%M:%S")
#print(duration,dtime)
#print(type(duration))
#print(type(dtime))
c.execute('''UPDATE attendance SET leave_time=?,duration=?,status=? where std_id=?''',(ltime,dtime,'0',std_id))
conn.commit()
def printr():
c.execute('''SELECT * FROM attendance''')
print(c.fetchall())
| 33.141026 | 192 | 0.630174 | import sqlite3
import datetime
import time
conn = sqlite3.connect('att.db')
c = conn.cursor()
def db(sid):
start_time = time.time()
c.execute('''CREATE TABLE IF NOT EXISTS attendance(ID integer PRIMARY KEY,std_id varchar2,entry_date varchar2,entry_time varchar2,leave_time varchar2,duration varchar2,status varchar2)''')
std_id=sid
t = (std_id,)
c.execute('SELECT * FROM attendance where std_id=?',t)
d=c.fetchone()
if d:
datetime_object = datetime.datetime.strptime(d[3],'%H:%M:%S')
dtime=datetime_object.strftime("%H:%M:%S")
FMT = "%H:%M:%S"
now = datetime.datetime.now()
ntime=now.strftime("%H:%M:%S")
date = datetime.datetime.strptime(str(ntime), FMT) - datetime.datetime.strptime(str(dtime), FMT)
tdelta = datetime.datetime.strptime(str(date),"%H:%M:%S")
rtime=int(tdelta.hour)*60+int(tdelta.minute)+(int(tdelta.second)/60)
if rtime>1:
exit_att(std_id,d[3])
else:
entry_att(std_id)
def entry_att(std_id):
now = datetime.datetime.now()
date=now.strftime("%y/%m/%d")
time=now.strftime("%H:%M:%S")
c.execute('''INSERT INTO attendance(std_id,entry_date,entry_time,status) values(?,?,?,?)''',(std_id,date,time,'0'))
conn.commit()
def exit_att(std_id,ptime):
now = datetime.datetime.now()
ltime=now.strftime("%H:%M:%S")
FMT = '%H:%M:%S'
duration = datetime.datetime.strptime(str(ltime), FMT) - datetime.datetime.strptime(str(ptime), FMT)
utime=datetime.datetime.strptime(str(duration),"%H:%M:%S")
dtime=utime.strftime("%H:%M:%S")
c.execute('''UPDATE attendance SET leave_time=?,duration=?,status=? where std_id=?''',(ltime,dtime,'0',std_id))
conn.commit()
def printr():
c.execute('''SELECT * FROM attendance''')
print(c.fetchall())
| true | true |
f71abeab574e7cf7dd44a881bb82f87cfbfbc051 | 2,915 | py | Python | __init__.py | LevinJac/viseme-mqtt-skill-mycroft | 5f2feb4336bfff1f2a293daf5f6feb43f7d98988 | [
"Apache-2.0"
] | null | null | null | __init__.py | LevinJac/viseme-mqtt-skill-mycroft | 5f2feb4336bfff1f2a293daf5f6feb43f7d98988 | [
"Apache-2.0"
] | null | null | null | __init__.py | LevinJac/viseme-mqtt-skill-mycroft | 5f2feb4336bfff1f2a293daf5f6feb43f7d98988 | [
"Apache-2.0"
] | null | null | null |
from mycroft import MycroftSkill
from mycroft.messagebus import Message
import json
from .lib import MqttService
class MessageListener(MycroftSkill):
# Initializing the skill
def initialize(self):
self.log.info("Initializing Skill MessageListener")
self.add_event('speak', self.handler_speak)
self.add_event('enclosure.mouth.viseme_list', self.handler_enclosure_mouth_viseme_list)
self.mqttservice = MqttService("VisemeSkill", "mosquitto", self.log.info)
self.prepare_for_webapp_message()
def prepare_for_webapp_message(self):
self.mqttservice.loopStart()
self.mqttservice.subscribe("faceme/webapp", self.message_recieved)
# acquiring speak data (the text mycroft will output):
def handler_speak(self, message):
self.text = message.data.get('utterance')
# acquiring mouth_viseme_list data:
def handler_enclosure_mouth_viseme_list(self, message):
self.startTime = message.data.get('start')
self.visemes = message.data.get('visemes')
# Call method send_visemelist(build_json()) to send our now complete dataset via mqtt in a json string format
self.send_visemelist(self.build_json())
# Function to convert the strings acquired from the messagebus into a json string and return it:
def build_json(self):
data_set = {"text": self.text, "start": self.startTime, "visemes": self.visemes}
json_dump = json.dumps(data_set)
return json_dump
def send_visemelist(self, payload):
self.mqttservice.subscribe("faceme/mycroft/visemes", self.message_recieved) # Printet on_message von MQTT_service
# Publish the payload we created in build_json() Wird richtig übertragen
self.mqttservice.publish("faceme/mycroft/visemes", payload)
def message_recieved(self, message):
self.log.info("Es ist eine Nachricht angekommen: " + str(message.payload) + " topic: " + message.topic)
if message.topic == "faceme/webapp":
self.webapp_message(message)
def webapp_message(self, message):
decoded_message = str(message.payload.decode("utf-8"))
msg = json.loads(decoded_message)
self.bus.emit(Message(msg["type"], msg["data"]))
def shutdown(self):
self.mqttservice.loopStop()
self.mqttservice.disconnect()
def create_skill():
return MessageListener()
###### Unused Function #######
# Function adds the duration each viseme should be displayed to it's array so the data would be: "visemes": [[CODE, END_TIME, DURATION], ...]
#def addDuration(self):
#self.visemes[0].append(self.visemes[0][1]) # Do we need this?
#for x in range(len(self.visemes)):
#if x < (len(self.visemes)-1):
#duration = self.visemes[x+1][1] - self.visemes[x][1]
#self.visemes[x+1].append(duration) | 41.642857 | 145 | 0.67753 |
from mycroft import MycroftSkill
from mycroft.messagebus import Message
import json
from .lib import MqttService
class MessageListener(MycroftSkill):
def initialize(self):
self.log.info("Initializing Skill MessageListener")
self.add_event('speak', self.handler_speak)
self.add_event('enclosure.mouth.viseme_list', self.handler_enclosure_mouth_viseme_list)
self.mqttservice = MqttService("VisemeSkill", "mosquitto", self.log.info)
self.prepare_for_webapp_message()
def prepare_for_webapp_message(self):
self.mqttservice.loopStart()
self.mqttservice.subscribe("faceme/webapp", self.message_recieved)
def handler_speak(self, message):
self.text = message.data.get('utterance')
def handler_enclosure_mouth_viseme_list(self, message):
self.startTime = message.data.get('start')
self.visemes = message.data.get('visemes')
self.send_visemelist(self.build_json())
def build_json(self):
data_set = {"text": self.text, "start": self.startTime, "visemes": self.visemes}
json_dump = json.dumps(data_set)
return json_dump
def send_visemelist(self, payload):
self.mqttservice.subscribe("faceme/mycroft/visemes", self.message_recieved)
self.mqttservice.publish("faceme/mycroft/visemes", payload)
def message_recieved(self, message):
self.log.info("Es ist eine Nachricht angekommen: " + str(message.payload) + " topic: " + message.topic)
if message.topic == "faceme/webapp":
self.webapp_message(message)
def webapp_message(self, message):
decoded_message = str(message.payload.decode("utf-8"))
msg = json.loads(decoded_message)
self.bus.emit(Message(msg["type"], msg["data"]))
def shutdown(self):
self.mqttservice.loopStop()
self.mqttservice.disconnect()
def create_skill():
return MessageListener()
(len(self.visemes)-1):
#duration = self.visemes[x+1][1] - self.visemes[x][1]
#self.visemes[x+1].append(duration) | true | true |
f71abf4ef891fb18baa38ab3843f5a02e2198d3b | 297 | py | Python | src/example_d/trade/get_position.py | Han1018/Cryptocurrency-Automated-Trading | 52a5b6d15eb9b2a3a69cc53bd159f6eec073614d | [
"MIT"
] | 1 | 2020-11-24T20:01:37.000Z | 2020-11-24T20:01:37.000Z | example_d/trade/get_position.py | vanshwassan/binance-python-futures | f5a1664ef1e18bc8a53479fab3fd6d5e512dba07 | [
"MIT"
] | 1 | 2021-07-20T15:25:11.000Z | 2021-07-20T15:25:11.000Z | example_d/trade/get_position.py | vanshwassan/binance-python-futures | f5a1664ef1e18bc8a53479fab3fd6d5e512dba07 | [
"MIT"
] | 1 | 2021-12-14T02:39:04.000Z | 2021-12-14T02:39:04.000Z | from binance_d import RequestClient
from binance_d.constant.test import *
from binance_d.base.printobject import *
from binance_d.model.constant import *
request_client = RequestClient(api_key=g_api_key, secret_key=g_secret_key)
result = request_client.get_position()
PrintMix.print_data(result)
| 33 | 74 | 0.841751 | from binance_d import RequestClient
from binance_d.constant.test import *
from binance_d.base.printobject import *
from binance_d.model.constant import *
request_client = RequestClient(api_key=g_api_key, secret_key=g_secret_key)
result = request_client.get_position()
PrintMix.print_data(result)
| true | true |
f71abf924989b3e0fac8c1f6862cb9ab2a3fcdff | 266 | py | Python | spectra/__init__.py | jevandezande/spectra | 95cf4aa7599c30183263740c88f94714d55e1d0a | [
"MIT"
] | 16 | 2019-10-03T21:30:45.000Z | 2022-03-09T22:18:44.000Z | spectra/__init__.py | jevandezande/spectra | 95cf4aa7599c30183263740c88f94714d55e1d0a | [
"MIT"
] | 8 | 2021-03-15T20:45:32.000Z | 2022-03-03T15:17:42.000Z | spectra/__init__.py | jevandezande/spectra | 95cf4aa7599c30183263740c88f94714d55e1d0a | [
"MIT"
] | 1 | 2021-07-26T18:50:06.000Z | 2021-07-26T18:50:06.000Z | """Top-level package for spectra."""
from .conv_spectrum import ConvSpectrum
from .sticks_spectrum import SticksSpectrum
__author__ = """Jonathon Vandezande"""
__email__ = "jevandezande@gmail.com"
__version__ = "0.4.0"
__all__ = ["ConvSpectrum", "SticksSpectrum"]
| 26.6 | 44 | 0.763158 | from .conv_spectrum import ConvSpectrum
from .sticks_spectrum import SticksSpectrum
__author__ = """Jonathon Vandezande"""
__email__ = "jevandezande@gmail.com"
__version__ = "0.4.0"
__all__ = ["ConvSpectrum", "SticksSpectrum"]
| true | true |
f71ac12590c5ba69a6a44f3ffa552f4ea88234a3 | 17,149 | py | Python | aslam_offline_calibration/kalibr/python/kalibr_camera_calibration/CameraIntializers.py | CORAL-CMU/kalibr | ebd759286944f156c3ae6202c27fe47667929744 | [
"BSD-4-Clause"
] | null | null | null | aslam_offline_calibration/kalibr/python/kalibr_camera_calibration/CameraIntializers.py | CORAL-CMU/kalibr | ebd759286944f156c3ae6202c27fe47667929744 | [
"BSD-4-Clause"
] | null | null | null | aslam_offline_calibration/kalibr/python/kalibr_camera_calibration/CameraIntializers.py | CORAL-CMU/kalibr | ebd759286944f156c3ae6202c27fe47667929744 | [
"BSD-4-Clause"
] | null | null | null | import sm
import aslam_backend as aopt
import aslam_cv as cv
import numpy as np
def addPoseDesignVariable(problem, T0=sm.Transformation()):
q_Dv = aopt.RotationQuaternionDv( T0.q() )
q_Dv.setActive( True )
problem.addDesignVariable(q_Dv)
t_Dv = aopt.EuclideanPointDv( T0.t() )
t_Dv.setActive( True )
problem.addDesignVariable(t_Dv)
return aopt.TransformationBasicDv( q_Dv.toExpression(), t_Dv.toExpression() )
def stereoCalibrate(camL_geometry, camH_geometry, obslist, distortionActive=False, baseline=None):
#####################################################
## find initial guess as median of all pnp solutions
#####################################################
if baseline is None:
r=[]; t=[]
for obsL, obsH in obslist:
#if we have observations for both camss
if obsL is not None and obsH is not None:
success, T_L = camL_geometry.geometry.estimateTransformation(obsL)
success, T_H = camH_geometry.geometry.estimateTransformation(obsH)
baseline = T_H.inverse()*T_L
t.append(baseline.t())
rv=sm.RotationVector()
r.append(rv.rotationMatrixToParameters( baseline.C() ))
r_median = np.median(np.asmatrix(r), axis=0).flatten().T
R_median = rv.parametersToRotationMatrix(r_median)
t_median = np.median(np.asmatrix(t), axis=0).flatten().T
baseline_HL = sm.Transformation( sm.rt2Transform(R_median, t_median) )
else:
baseline_HL = baseline
#verbose output
if sm.getLoggingLevel()==sm.LoggingLevel.Debug:
dL = camL_geometry.geometry.projection().distortion().getParameters().flatten()
pL = camL_geometry.geometry.projection().getParameters().flatten()
dH = camH_geometry.geometry.projection().distortion().getParameters().flatten()
pH = camH_geometry.geometry.projection().getParameters().flatten()
sm.logDebug("initial guess for stereo calib: {0}".format(baseline_HL.T()))
sm.logDebug("initial guess for intrinsics camL: {0}".format(pL))
sm.logDebug("initial guess for intrinsics camH: {0}".format(pH))
sm.logDebug("initial guess for distortion camL: {0}".format(dL))
sm.logDebug("initial guess for distortion camH: {0}".format(dH))
############################################
## solve the bundle adjustment
############################################
problem = aopt.OptimizationProblem()
#baseline design variable
baseline_dv = addPoseDesignVariable(problem, baseline_HL)
#target pose dv for all target views (=T_camL_w)
target_pose_dvs = list()
for obsL, obsH in obslist:
if obsL is not None: #use camL if we have an obs for this one
success, T_t_cL = camL_geometry.geometry.estimateTransformation(obsL)
else:
success, T_t_cH = camH_geometry.geometry.estimateTransformation(obsH)
T_t_cL = T_t_cH*baseline_HL #apply baseline for the second camera
target_pose_dv = addPoseDesignVariable(problem, T_t_cL)
target_pose_dvs.append(target_pose_dv)
#add camera dvs
camL_geometry.setDvActiveStatus(camL_geometry.projectionActive, distortionActive or camL_geometry.distortionActive, False)
camH_geometry.setDvActiveStatus(camH_geometry.projectionActive, distortionActive or camH_geometry.distortionActive, False)
problem.addDesignVariable(camL_geometry.dv.distortionDesignVariable())
problem.addDesignVariable(camL_geometry.dv.projectionDesignVariable())
problem.addDesignVariable(camL_geometry.dv.shutterDesignVariable())
problem.addDesignVariable(camH_geometry.dv.distortionDesignVariable())
problem.addDesignVariable(camH_geometry.dv.projectionDesignVariable())
problem.addDesignVariable(camH_geometry.dv.shutterDesignVariable())
############################################
## add error terms
############################################
#corner uncertainty
# \todo pass in the detector uncertainty somehow.
cornerUncertainty = 1.0
R = np.eye(2) * cornerUncertainty * cornerUncertainty
invR = np.linalg.inv(R)
#Add reprojection error terms for both cameras
reprojectionErrors0 = []; reprojectionErrors1 = []
for cidx, cam in enumerate([camL_geometry, camH_geometry]):
sm.logDebug("stereoCalibration: adding camera error terms for {0} calibration targets".format(len(obslist)))
#get the image and target points corresponding to the frame
target = cam.ctarget.detector.target()
#add error terms for all observations
for view_id, obstuple in enumerate(obslist):
#add error terms if we have an observation for this cam
obs=obstuple[cidx]
if obs is not None:
T_cam_w = target_pose_dvs[view_id].toExpression().inverse()
#add the baseline for the second camera
if cidx!=0:
T_cam_w = baseline_dv.toExpression() * T_cam_w
for i in range(0, target.size()):
p_target = aopt.HomogeneousExpression(sm.toHomogeneous(target.point(i)));
valid, y = obs.imagePoint(i)
if valid:
# Create an error term.
rerr = cam.model.reprojectionError(y, invR, T_cam_w * p_target, cam.dv)
rerr.idx = i
problem.addErrorTerm(rerr)
if cidx==0:
reprojectionErrors0.append(rerr)
else:
reprojectionErrors1.append(rerr)
sm.logDebug("stereoCalibrate: added {0} camera error terms".format( len(reprojectionErrors0)+len(reprojectionErrors1) ))
############################################
## solve
############################################
options = aopt.Optimizer2Options()
options.verbose = True if sm.getLoggingLevel()==sm.LoggingLevel.Debug else False
options.nThreads = 4
options.convergenceDeltaX = 1e-3
options.convergenceDeltaJ = 1
options.maxIterations = 200
options.trustRegionPolicy = aopt.LevenbergMarquardtTrustRegionPolicy(10)
optimizer = aopt.Optimizer2(options)
optimizer.setProblem(problem)
#verbose output
if sm.getLoggingLevel()==sm.LoggingLevel.Debug:
sm.logDebug("Before optimization:")
e2 = np.array([ e.evaluateError() for e in reprojectionErrors0 ])
sm.logDebug( " Reprojection error squarred (camL): mean {0}, median {1}, std: {2}".format(np.mean(e2), np.median(e2), np.std(e2) ) )
e2 = np.array([ e.evaluateError() for e in reprojectionErrors1 ])
sm.logDebug( " Reprojection error squarred (camH): mean {0}, median {1}, std: {2}".format(np.mean(e2), np.median(e2), np.std(e2) ) )
sm.logDebug("baseline={0}".format(baseline_dv.toTransformationMatrix()))
try:
retval = optimizer.optimize()
if retval.linearSolverFailure:
sm.logError("stereoCalibrate: Optimization failed!")
success = not retval.linearSolverFailure
except:
sm.logError("stereoCalibrate: Optimization failed!")
success = False
if sm.getLoggingLevel()==sm.LoggingLevel.Debug:
sm.logDebug("After optimization:")
e2 = np.array([ e.evaluateError() for e in reprojectionErrors0 ])
sm.logDebug( " Reprojection error squarred (camL): mean {0}, median {1}, std: {2}".format(np.mean(e2), np.median(e2), np.std(e2) ) )
e2 = np.array([ e.evaluateError() for e in reprojectionErrors1 ])
sm.logDebug( " Reprojection error squarred (camH): mean {0}, median {1}, std: {2}".format(np.mean(e2), np.median(e2), np.std(e2) ) )
#verbose output
if sm.getLoggingLevel()==sm.LoggingLevel.Debug:
dL = camL_geometry.geometry.projection().distortion().getParameters().flatten()
pL = camL_geometry.geometry.projection().getParameters().flatten()
dH = camH_geometry.geometry.projection().distortion().getParameters().flatten()
pH = camH_geometry.geometry.projection().getParameters().flatten()
sm.logDebug("guess for intrinsics camL: {0}".format(pL))
sm.logDebug("guess for intrinsics camH: {0}".format(pH))
sm.logDebug("guess for distortion camL: {0}".format(dL))
sm.logDebug("guess for distortion camH: {0}".format(dH))
if success:
baseline_HL = sm.Transformation(baseline_dv.toTransformationMatrix())
return success, baseline_HL
else:
#return the intiial guess if we fail
return success, baseline_HL
def calibrateIntrinsics(cam_geometry, obslist, distortionActive=True, intrinsicsActive=True):
#verbose output
if sm.getLoggingLevel()==sm.LoggingLevel.Debug:
d = cam_geometry.geometry.projection().distortion().getParameters().flatten()
p = cam_geometry.geometry.projection().getParameters().flatten()
sm.logDebug("calibrateIntrinsics: intrinsics guess: {0}".format(p))
sm.logDebug("calibrateIntrinsics: distortion guess: {0}".format(d))
############################################
## solve the bundle adjustment
############################################
problem = aopt.OptimizationProblem()
#add camera dvs
cam_geometry.setDvActiveStatus(intrinsicsActive, distortionActive, False)
problem.addDesignVariable(cam_geometry.dv.distortionDesignVariable())
problem.addDesignVariable(cam_geometry.dv.projectionDesignVariable())
problem.addDesignVariable(cam_geometry.dv.shutterDesignVariable())
#corner uncertainty
cornerUncertainty = 1.0
R = np.eye(2) * cornerUncertainty * cornerUncertainty
invR = np.linalg.inv(R)
#get the image and target points corresponding to the frame
target = cam_geometry.ctarget.detector.target()
#target pose dv for all target views (=T_camL_w)
reprojectionErrors = [];
sm.logDebug("calibrateIntrinsics: adding camera error terms for {0} calibration targets".format(len(obslist)))
target_pose_dvs=list()
for obs in obslist:
success, T_t_c = cam_geometry.geometry.estimateTransformation(obs)
target_pose_dv = addPoseDesignVariable(problem, T_t_c)
target_pose_dvs.append(target_pose_dv)
T_cam_w = target_pose_dv.toExpression().inverse()
## add error terms
for i in range(0, target.size()):
p_target = aopt.HomogeneousExpression(sm.toHomogeneous(target.point(i)));
valid, y = obs.imagePoint(i)
if valid:
rerr = cam_geometry.model.reprojectionError(y, invR, T_cam_w * p_target, cam_geometry.dv)
problem.addErrorTerm(rerr)
reprojectionErrors.append(rerr)
sm.logDebug("calibrateIntrinsics: added {0} camera error terms".format(len(reprojectionErrors)))
############################################
## solve
############################################
options = aopt.Optimizer2Options()
options.verbose = True if sm.getLoggingLevel()==sm.LoggingLevel.Debug else False
options.nThreads = 4
options.convergenceDeltaX = 1e-3
options.convergenceDeltaJ = 1
options.maxIterations = 200
options.trustRegionPolicy = aopt.LevenbergMarquardtTrustRegionPolicy(10)
optimizer = aopt.Optimizer2(options)
optimizer.setProblem(problem)
#verbose output
if sm.getLoggingLevel()==sm.LoggingLevel.Debug:
sm.logDebug("Before optimization:")
e2 = np.array([ e.evaluateError() for e in reprojectionErrors ])
sm.logDebug( " Reprojection error squarred (camL): mean {0}, median {1}, std: {2}".format(np.mean(e2), np.median(e2), np.std(e2) ) )
#run intrinsic calibration
try:
retval = optimizer.optimize()
if retval.linearSolverFailure:
sm.logError("calibrateIntrinsics: Optimization failed!")
success = not retval.linearSolverFailure
except:
sm.logError("calibrateIntrinsics: Optimization failed!")
success = False
#verbose output
if sm.getLoggingLevel()==sm.LoggingLevel.Debug:
d = cam_geometry.geometry.projection().distortion().getParameters().flatten()
p = cam_geometry.geometry.projection().getParameters().flatten()
sm.logDebug("calibrateIntrinsics: guess for intrinsics cam: {0}".format(p))
sm.logDebug("calibrateIntrinsics: guess for distortion cam: {0}".format(d))
return success
def solveFullBatch(cameras, baseline_guesses, graph):
############################################
## solve the bundle adjustment
############################################
problem = aopt.OptimizationProblem()
#add camera dvs
for cam in cameras:
cam.setDvActiveStatus(cam.projectionActive, cam.distortionActive, False)
problem.addDesignVariable(cam.dv.distortionDesignVariable())
problem.addDesignVariable(cam.dv.projectionDesignVariable())
problem.addDesignVariable(cam.dv.shutterDesignVariable())
baseline_dvs = list()
for baseline_idx in range(0, len(cameras)-1):
baseline_dv = aopt.TransformationDv(baseline_guesses[baseline_idx])
for i in range(0, baseline_dv.numDesignVariables()):
problem.addDesignVariable(baseline_dv.getDesignVariable(i))
baseline_dvs.append( baseline_dv )
#corner uncertainty
cornerUncertainty = 1.0
R = np.eye(2) * cornerUncertainty * cornerUncertainty
invR = np.linalg.inv(R)
#get the target
target = cameras[0].ctarget.detector.target()
#Add calibration target reprojection error terms for all camera in chain
target_pose_dvs = list()
#shuffle the views
reprojectionErrors = [];
timestamps = graph.obs_db.getAllViewTimestamps()
for view_id, timestamp in enumerate(timestamps):
#get all observations for all cams at this time
obs_tuple = graph.obs_db.getAllObsAtTimestamp(timestamp)
#create a target pose dv for all target views (= T_cam0_w)
T0 = graph.getTargetPoseGuess(timestamp, cameras, baseline_guesses)
target_pose_dv = addPoseDesignVariable(problem, T0)
target_pose_dvs.append(target_pose_dv)
for cidx, obs in obs_tuple:
cam = cameras[cidx]
#calibration target coords to camera X coords
T_cam0_calib = target_pose_dv.toExpression().inverse()
#build pose chain (target->cam0->baselines->camN)
T_camN_calib = T_cam0_calib
for idx in range(0, cidx):
T_camN_calib = baseline_dvs[idx].toExpression() * T_camN_calib
## add error terms
for i in range(0, target.size()):
p_target = aopt.HomogeneousExpression(sm.toHomogeneous(target.point(i)));
valid, y = obs.imagePoint(i)
if valid:
rerr = cameras[cidx].model.reprojectionError(y, invR, T_camN_calib * p_target, cameras[cidx].dv)
problem.addErrorTerm(rerr)
reprojectionErrors.append(rerr)
sm.logDebug("solveFullBatch: added {0} camera error terms".format(len(reprojectionErrors)))
############################################
## solve
############################################
options = aopt.Optimizer2Options()
options.verbose = True if sm.getLoggingLevel()==sm.LoggingLevel.Debug else False
options.nThreads = 4
options.convergenceDeltaX = 1e-3
options.convergenceDeltaJ = 1
options.maxIterations = 250
options.trustRegionPolicy = aopt.LevenbergMarquardtTrustRegionPolicy(10)
optimizer = aopt.Optimizer2(options)
optimizer.setProblem(problem)
#verbose output
if sm.getLoggingLevel()==sm.LoggingLevel.Debug:
sm.logDebug("Before optimization:")
e2 = np.array([ e.evaluateError() for e in reprojectionErrors ])
sm.logDebug( " Reprojection error squarred (camL): mean {0}, median {1}, std: {2}".format(np.mean(e2), np.median(e2), np.std(e2) ) )
#run intrinsic calibration
try:
retval = optimizer.optimize()
if retval.linearSolverFailure:
sm.logError("calibrateIntrinsics: Optimization failed!")
success = not retval.linearSolverFailure
except:
sm.logError("calibrateIntrinsics: Optimization failed!")
success = False
baselines=list()
for baseline_dv in baseline_dvs:
baselines.append( sm.Transformation(baseline_dv.T()) )
return success, baselines
| 44.542857 | 141 | 0.623068 | import sm
import aslam_backend as aopt
import aslam_cv as cv
import numpy as np
def addPoseDesignVariable(problem, T0=sm.Transformation()):
q_Dv = aopt.RotationQuaternionDv( T0.q() )
q_Dv.setActive( True )
problem.addDesignVariable(q_Dv)
t_Dv = aopt.EuclideanPointDv( T0.t() )
t_Dv.setActive( True )
problem.addDesignVariable(t_Dv)
return aopt.TransformationBasicDv( q_Dv.toExpression(), t_Dv.toExpression() )
def stereoCalibrate(camL_geometry, camH_geometry, obslist, distortionActive=False, baseline=None):
| true | true |
f71ac13fb53339858ca85e2c4cb872952d99c45f | 41,090 | py | Python | clusterdock_centos_7-master/clusterdock/topologies/cdh/cm_api/endpoints/types.py | J0nathan12/clusterdock_centos_7 | e8dabbab82111a4a61ff27b6d7f529552c682c3e | [
"Apache-2.0"
] | 77 | 2016-08-02T17:16:59.000Z | 2021-12-30T02:21:09.000Z | clusterdock/topologies/cdh/cm_api/endpoints/types.py | cloudera/clusterdock | 222ac2d5a9aa2fef7b84b3cf704f9ee54c0cc5d2 | [
"Apache-2.0"
] | 41 | 2016-08-01T20:31:50.000Z | 2022-03-24T00:52:32.000Z | clusterdock/topologies/cdh/cm_api/endpoints/types.py | J0nathan12/clusterdock_jdock | e32978e51738d652d7bc73d5f4b71d2766e008ac | [
"Apache-2.0"
] | 77 | 2016-08-03T03:55:58.000Z | 2022-01-05T07:13:51.000Z | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import json
except ImportError:
import simplejson as json
import copy
import datetime
import time
__docformat__ = "epytext"
class Attr(object):
"""
Encapsulates information about an attribute in the JSON encoding of the
object. It identifies properties of the attribute such as whether it's
read-only, its type, etc.
"""
DATE_FMT = "%Y-%m-%dT%H:%M:%S.%fZ"
def __init__(self, atype=None, rw=True, is_api_list=False):
self._atype = atype
self._is_api_list = is_api_list
self.rw = rw
def to_json(self, value, preserve_ro):
"""
Returns the JSON encoding of the given attribute value.
If the value has a 'to_json_dict' object, that method is called. Otherwise,
the following values are returned for each input type:
- datetime.datetime: string with the API representation of a date.
- dictionary: if 'atype' is ApiConfig, a list of ApiConfig objects.
- python list: python list (or ApiList) with JSON encoding of items
- the raw value otherwise
"""
if hasattr(value, 'to_json_dict'):
return value.to_json_dict(preserve_ro)
elif isinstance(value, dict) and self._atype == ApiConfig:
return config_to_api_list(value)
elif isinstance(value, datetime.datetime):
return value.strftime(self.DATE_FMT)
elif isinstance(value, list) or isinstance(value, tuple):
if self._is_api_list:
return ApiList(value).to_json_dict()
else:
return [ self.to_json(x, preserve_ro) for x in value ]
else:
return value
def from_json(self, resource_root, data):
"""
Parses the given JSON value into an appropriate python object.
This means:
- a datetime.datetime if 'atype' is datetime.datetime
- a converted config dictionary or config list if 'atype' is ApiConfig
- if the attr is an API list, an ApiList with instances of 'atype'
- an instance of 'atype' if it has a 'from_json_dict' method
- a python list with decoded versions of the member objects if the input
is a python list.
- the raw value otherwise
"""
if data is None:
return None
if self._atype == datetime.datetime:
return datetime.datetime.strptime(data, self.DATE_FMT)
elif self._atype == ApiConfig:
# ApiConfig is special. We want a python dictionary for summary views,
# but an ApiList for full views. Try to detect each case from the JSON
# data.
if not data['items']:
return { }
first = data['items'][0]
return json_to_config(data, len(first) == 2)
elif self._is_api_list:
return ApiList.from_json_dict(data, resource_root, self._atype)
elif isinstance(data, list):
return [ self.from_json(resource_root, x) for x in data ]
elif hasattr(self._atype, 'from_json_dict'):
return self._atype.from_json_dict(data, resource_root)
else:
return data
class ROAttr(Attr):
"""
Subclass that just defines the attribute as read-only.
"""
def __init__(self, atype=None, is_api_list=False):
Attr.__init__(self, atype=atype, rw=False, is_api_list=is_api_list)
def check_api_version(resource_root, min_version):
"""
Checks if the resource_root's API version it at least the given minimum
version.
"""
if resource_root.version < min_version:
raise Exception("API version %s is required but %s is in use."
% (min_version, resource_root.version))
def call(method, path, ret_type,
ret_is_list=False, data=None, params=None, api_version=1):
"""
Generic function for calling a resource method and automatically dealing with
serialization of parameters and deserialization of return values.
@param method: method to call (must be bound to a resource;
e.g., "resource_root.get").
@param path: the full path of the API method to call.
@param ret_type: return type of the call.
@param ret_is_list: whether the return type is an ApiList.
@param data: Optional data to send as payload to the call.
@param params: Optional query parameters for the call.
@param api_version: minimum API version for the call.
"""
check_api_version(method.im_self, api_version)
if data is not None:
data = json.dumps(Attr(is_api_list=True).to_json(data, False))
ret = method(path, data=data, params=params)
else:
ret = method(path, params=params)
if ret_type is None:
return
elif ret_is_list:
return ApiList.from_json_dict(ret, method.im_self, ret_type)
elif isinstance(ret, list):
return [ ret_type.from_json_dict(x, method.im_self) for x in ret ]
else:
return ret_type.from_json_dict(ret, method.im_self)
class BaseApiObject(object):
"""
The BaseApiObject helps with (de)serialization from/to JSON.
The derived class has two ways of defining custom attributes:
- Overwriting the '_ATTRIBUTES' field with the attribute dictionary
- Override the _get_attributes() method, in case static initialization of
the above field is not possible.
It's recommended that the _get_attributes() implementation do caching to
avoid computing the dictionary on every invocation.
The derived class's constructor must call the base class's init() static
method. All constructor arguments (aside from self and resource_root) must
be keywords arguments with default values (typically None), or
from_json_dict() will not work.
"""
_ATTRIBUTES = { }
_WHITELIST = ( '_resource_root', '_attributes' )
@classmethod
def _get_attributes(cls):
"""
Returns a map of property names to attr instances (or None for default
attribute behavior) describing the properties of the object.
By default, this method will return the class's _ATTRIBUTES field.
Classes can override this method to do custom initialization of the
attributes when needed.
"""
return cls._ATTRIBUTES
@staticmethod
def init(obj, resource_root, attrs=None):
"""
Wraper around the real constructor to avoid issues with the 'self'
argument. Call like this, from a subclass's constructor:
- BaseApiObject.init(self, locals())
"""
# This works around http://bugs.python.org/issue2646
# We use unicode strings as keys in kwargs.
str_attrs = { }
if attrs:
for k, v in attrs.iteritems():
if k not in ('self', 'resource_root'):
str_attrs[k] = v
BaseApiObject.__init__(obj, resource_root, **str_attrs)
def __init__(self, resource_root, **attrs):
"""
Initializes internal state and sets all known writable properties of the
object to None. Then initializes the properties given in the provided
attributes dictionary.
@param resource_root: API resource object.
@param attrs: optional dictionary of attributes to set. This should only
contain r/w attributes.
"""
self._resource_root = resource_root
for name, attr in self._get_attributes().iteritems():
object.__setattr__(self, name, None)
if attrs:
self._set_attrs(attrs, from_json=False)
def _set_attrs(self, attrs, allow_ro=False, from_json=True):
"""
Sets all the attributes in the dictionary. Optionally, allows setting
read-only attributes (e.g. when deserializing from JSON) and skipping
JSON deserialization of values.
"""
for k, v in attrs.iteritems():
attr = self._check_attr(k, allow_ro)
if attr and from_json:
v = attr.from_json(self._get_resource_root(), v)
object.__setattr__(self, k, v)
def __setattr__(self, name, val):
if name not in BaseApiObject._WHITELIST:
self._check_attr(name, False)
object.__setattr__(self, name, val)
def _check_attr(self, name, allow_ro):
if name not in self._get_attributes():
raise AttributeError('Invalid property %s for class %s.' %
(name, self.__class__.__name__))
attr = self._get_attributes()[name]
if not allow_ro and attr and not attr.rw:
raise AttributeError('Attribute %s of class %s is read only.' %
(name, self.__class__.__name__))
return attr
def _get_resource_root(self):
return self._resource_root
def _update(self, api_obj):
"""Copy state from api_obj to this object."""
if not isinstance(self, api_obj.__class__):
raise ValueError(
"Class %s does not derive from %s; cannot update attributes." %
(self.__class__, api_obj.__class__))
for name in self._get_attributes().keys():
try:
val = getattr(api_obj, name)
setattr(self, name, val)
except AttributeError, ignored:
pass
def to_json_dict(self, preserve_ro=False):
dic = { }
for name, attr in self._get_attributes().iteritems():
if not preserve_ro and attr and not attr.rw:
continue
try:
value = getattr(self, name)
if value is not None:
if attr:
dic[name] = attr.to_json(value, preserve_ro)
else:
dic[name] = value
except AttributeError:
pass
return dic
def __str__(self):
"""
Default implementation of __str__. Uses the type name and the first
attribute retrieved from the attribute map to create the string.
"""
name = self._get_attributes().keys()[0]
value = getattr(self, name, None)
return "<%s>: %s = %s" % (self.__class__.__name__, name, value)
@classmethod
def from_json_dict(cls, dic, resource_root):
obj = cls(resource_root)
obj._set_attrs(dic, allow_ro=True)
return obj
class BaseApiResource(BaseApiObject):
"""
A specialization of BaseApiObject that provides some utility methods for
resources. This class allows easier serialization / deserialization of
parameters and return values.
"""
def _api_version(self):
"""
Returns the minimum API version for this resource. Defaults to 1.
"""
return 1
def _path(self):
"""
Returns the path to the resource.
e.g., for a service 'foo' in cluster 'bar', this should return
'/clusters/bar/services/foo'.
"""
raise NotImplementedError
def _require_min_api_version(self, version):
"""
Raise an exception if the version of the api is less than the given version.
@param version: The minimum required version.
"""
actual_version = self._get_resource_root().version
version = max(version, self._api_version())
if actual_version < version:
raise Exception("API version %s is required but %s is in use."
% (version, actual_version))
def _cmd(self, command, data=None, params=None, api_version=1):
"""
Invokes a command on the resource. Commands are expected to be under the
"commands/" sub-resource.
"""
return self._post("commands/" + command, ApiCommand,
data=data, params=params, api_version=api_version)
def _get_config(self, rel_path, view, api_version=1):
"""
Retrieves an ApiConfig list from the given relative path.
"""
self._require_min_api_version(api_version)
params = view and dict(view=view) or None
resp = self._get_resource_root().get(self._path() + '/' + rel_path,
params=params)
return json_to_config(resp, view == 'full')
def _update_config(self, rel_path, config, api_version=1):
self._require_min_api_version(api_version)
resp = self._get_resource_root().put(self._path() + '/' + rel_path,
data=config_to_json(config))
return json_to_config(resp, False)
def _delete(self, rel_path, ret_type, ret_is_list=False, params=None,
api_version=1):
return self._call('delete', rel_path, ret_type, ret_is_list, None, params,
api_version)
def _get(self, rel_path, ret_type, ret_is_list=False, params=None,
api_version=1):
return self._call('get', rel_path, ret_type, ret_is_list, None, params,
api_version)
def _post(self, rel_path, ret_type, ret_is_list=False, data=None, params=None,
api_version=1):
return self._call('post', rel_path, ret_type, ret_is_list, data, params,
api_version)
def _put(self, rel_path, ret_type, ret_is_list=False, data=None, params=None,
api_version=1):
return self._call('put', rel_path, ret_type, ret_is_list, data, params,
api_version)
def _call(self, method, rel_path, ret_type, ret_is_list=False, data=None,
params=None, api_version=1):
path = self._path()
if rel_path:
path += '/' + rel_path
return call(getattr(self._get_resource_root(), method),
path,
ret_type,
ret_is_list,
data,
params,
api_version)
class ApiList(BaseApiObject):
"""A list of some api object"""
LIST_KEY = "items"
def __init__(self, objects, resource_root=None, **attrs):
BaseApiObject.__init__(self, resource_root, **attrs)
# Bypass checks in BaseApiObject.__setattr__
object.__setattr__(self, 'objects', objects)
def __str__(self):
return "<ApiList>(%d): [%s]" % (
len(self.objects),
", ".join([str(item) for item in self.objects]))
def to_json_dict(self, preserve_ro=False):
ret = BaseApiObject.to_json_dict(self, preserve_ro)
attr = Attr()
ret[ApiList.LIST_KEY] = [ attr.to_json(x, preserve_ro) for x in self.objects ]
return ret
def __len__(self):
return self.objects.__len__()
def __iter__(self):
return self.objects.__iter__()
def __getitem__(self, i):
return self.objects.__getitem__(i)
def __getslice(self, i, j):
return self.objects.__getslice__(i, j)
@classmethod
def from_json_dict(cls, dic, resource_root, member_cls=None):
if not member_cls:
member_cls = cls._MEMBER_CLASS
attr = Attr(atype=member_cls)
items = []
if ApiList.LIST_KEY in dic:
items = [ attr.from_json(resource_root, x) for x in dic[ApiList.LIST_KEY] ]
ret = cls(items)
# If the class declares custom attributes, populate them based on the input
# dict. The check avoids extra overhead for the common case, where we just
# have a plain list. _set_attrs() also does not understand the "items"
# attribute, so it can't be in the input data.
if cls._ATTRIBUTES:
if ApiList.LIST_KEY in dic:
dic = copy.copy(dic)
del dic[ApiList.LIST_KEY]
ret._set_attrs(dic, allow_ro=True)
return ret
class ApiHostRef(BaseApiObject):
_ATTRIBUTES = {
'hostId' : None,
}
def __init__(self, resource_root, hostId=None):
BaseApiObject.init(self, resource_root, locals())
def __str__(self):
return "<ApiHostRef>: %s" % (self.hostId)
class ApiServiceRef(BaseApiObject):
_ATTRIBUTES = {
'clusterName' : None,
'serviceName' : None,
'peerName' : None,
}
def __init__(self, resource_root, serviceName=None, clusterName=None,
peerName=None):
BaseApiObject.init(self, resource_root, locals())
class ApiClusterRef(BaseApiObject):
_ATTRIBUTES = {
'clusterName' : None,
}
def __init__(self, resource_root, clusterName = None):
BaseApiObject.init(self, resource_root, locals())
class ApiRoleRef(BaseApiObject):
_ATTRIBUTES = {
'clusterName' : None,
'serviceName' : None,
'roleName' : None,
}
def __init__(self, resource_root, serviceName=None, roleName=None,
clusterName=None):
BaseApiObject.init(self, resource_root, locals())
class ApiRoleConfigGroupRef(BaseApiObject):
_ATTRIBUTES = {
'roleConfigGroupName' : None,
}
def __init__(self, resource_root, roleConfigGroupName=None):
BaseApiObject.init(self, resource_root, locals())
class ApiCommand(BaseApiObject):
SYNCHRONOUS_COMMAND_ID = -1
@classmethod
def _get_attributes(cls):
if not cls.__dict__.has_key('_ATTRIBUTES'):
cls._ATTRIBUTES = {
'id' : ROAttr(),
'name' : ROAttr(),
'startTime' : ROAttr(datetime.datetime),
'endTime' : ROAttr(datetime.datetime),
'active' : ROAttr(),
'success' : ROAttr(),
'resultMessage' : ROAttr(),
'clusterRef' : ROAttr(ApiClusterRef),
'serviceRef' : ROAttr(ApiServiceRef),
'roleRef' : ROAttr(ApiRoleRef),
'hostRef' : ROAttr(ApiHostRef),
'children' : ROAttr(ApiCommand, is_api_list=True),
'parent' : ROAttr(ApiCommand),
'resultDataUrl' : ROAttr(),
'canRetry' : ROAttr(),
}
return cls._ATTRIBUTES
def __str__(self):
return "<ApiCommand>: '%s' (id: %s; active: %s; success: %s)" % (
self.name, self.id, self.active, self.success)
def _path(self):
return '/commands/%d' % self.id
def fetch(self):
"""
Retrieve updated data about the command from the server.
@return: A new ApiCommand object.
"""
if self.id == ApiCommand.SYNCHRONOUS_COMMAND_ID:
return self
resp = self._get_resource_root().get(self._path())
return ApiCommand.from_json_dict(resp, self._get_resource_root())
def wait(self, timeout=None):
"""
Wait for command to finish.
@param timeout: (Optional) Max amount of time (in seconds) to wait. Wait
forever by default.
@return: The final ApiCommand object, containing the last known state.
The command may still be running in case of timeout.
"""
if self.id == ApiCommand.SYNCHRONOUS_COMMAND_ID:
return self
SLEEP_SEC = 5
if timeout is None:
deadline = None
else:
deadline = time.time() + timeout
while True:
cmd = self.fetch()
if not cmd.active:
return cmd
if deadline is not None:
now = time.time()
if deadline < now:
return cmd
else:
time.sleep(min(SLEEP_SEC, deadline - now))
else:
time.sleep(SLEEP_SEC)
def abort(self):
"""
Abort a running command.
@return: A new ApiCommand object with the updated information.
"""
if self.id == ApiCommand.SYNCHRONOUS_COMMAND_ID:
return self
path = self._path() + '/abort'
resp = self._get_resource_root().post(path)
return ApiCommand.from_json_dict(resp, self._get_resource_root())
def retry(self):
"""
Retry a failed or aborted command.
Note: The retry will only work for ClusterUpgrade command for now.
@return: A new ApiCommand object with the updated information.
"""
path = self._path() + '/retry'
resp = self._get_resource_root().post(path)
return ApiCommand.from_json_dict(resp, self._get_resource_root())
class ApiBulkCommandList(ApiList):
_ATTRIBUTES = {
'errors' : ROAttr(),
}
_MEMBER_CLASS = ApiCommand
class ApiCommandMetadata(BaseApiObject):
_ATTRIBUTES = {
'name' : ROAttr(),
'argSchema' : ROAttr(),
}
def __init__(self, resource_root):
BaseApiObject.init(self, resource_root)
def __str__(self):
return "<ApiCommandMetadata>: %s (%s)" % (self.name, self.argSchema)
#
# Metrics.
#
class ApiMetricData(BaseApiObject):
"""Metric reading data."""
_ATTRIBUTES = {
'timestamp' : ROAttr(datetime.datetime),
'value' : ROAttr(),
}
def __init__(self, resource_root):
BaseApiObject.init(self, resource_root)
class ApiMetric(BaseApiObject):
"""Metric information."""
_ATTRIBUTES = {
'name' : ROAttr(),
'context' : ROAttr(),
'unit' : ROAttr(),
'data' : ROAttr(ApiMetricData),
'displayName' : ROAttr(),
'description' : ROAttr(),
}
def __init__(self, resource_root):
BaseApiObject.init(self, resource_root)
#
# Activities.
#
class ApiActivity(BaseApiObject):
_ATTRIBUTES = {
'name' : ROAttr(),
'type' : ROAttr(),
'parent' : ROAttr(),
'startTime' : ROAttr(),
'finishTime' : ROAttr(),
'id' : ROAttr(),
'status' : ROAttr(),
'user' : ROAttr(),
'group' : ROAttr(),
'inputDir' : ROAttr(),
'outputDir' : ROAttr(),
'mapper' : ROAttr(),
'combiner' : ROAttr(),
'reducer' : ROAttr(),
'queueName' : ROAttr(),
'schedulerPriority' : ROAttr(),
}
def __init__(self, resource_root):
BaseApiObject.init(self, resource_root)
def __str__(self):
return "<ApiActivity>: %s (%s)" % (self.name, self.status)
#
# Replication
#
class ApiCmPeer(BaseApiObject):
_ATTRIBUTES = {
'name' : None,
'url' : None,
'username' : None,
'password' : None,
'type' : None,
'clouderaManagerCreatedUser' : None,
}
def __str__(self):
return "<ApiPeer>: %s (%s)" % (self.name, self.url)
class ApiLicensedFeatureUsage(BaseApiObject):
_ATTRIBUTES = {
'totals' : ROAttr(),
'clusters' : ROAttr(),
}
class ApiHdfsReplicationArguments(BaseApiObject):
_ATTRIBUTES = {
'sourceService' : Attr(ApiServiceRef),
'sourcePath' : None,
'destinationPath' : None,
'mapreduceServiceName' : None,
'userName' : None,
'numMaps' : None,
'dryRun' : None,
'bandwidthPerMap' : None,
'logPath' : None,
'schedulerPoolName' : None,
'abortOnError' : None,
'preservePermissions' : None,
'preserveBlockSize' : None,
'preserveReplicationCount' : None,
'removeMissingFiles' : None,
'skipChecksumChecks' : None,
'skipTrash' : None,
'replicationStrategy' : None,
'preserveXAttrs' : None,
'exclusionFilters' : None,
}
class ApiHdfsReplicationResult(BaseApiObject):
_ATTRIBUTES = {
'progress' : ROAttr(),
'counters' : ROAttr(),
'numBytesDryRun' : ROAttr(),
'numFilesDryRun' : ROAttr(),
'numFilesExpected' : ROAttr(),
'numBytesExpected' : ROAttr(),
'numFilesCopied' : ROAttr(),
'numBytesCopied' : ROAttr(),
'numFilesSkipped' : ROAttr(),
'numBytesSkipped' : ROAttr(),
'numFilesDeleted' : ROAttr(),
'numFilesCopyFailed' : ROAttr(),
'numBytesCopyFailed' : ROAttr(),
'setupError' : ROAttr(),
'jobId' : ROAttr(),
'jobDetailsUri' : ROAttr(),
'dryRun' : ROAttr(),
'snapshottedDirs' : ROAttr(),
'failedFiles' : ROAttr(),
'runAsUser' : ROAttr(),
}
class ApiHiveTable(BaseApiObject):
_ATTRIBUTES = {
'database' : None,
'tableName' : None,
}
def __str__(self):
return "<ApiHiveTable>: %s, %s" % (self.database, self.tableName)
class ApiImpalaUDF(BaseApiObject):
_ATTRIBUTES = {
'database' : ROAttr(),
'signature' : ROAttr(),
}
def __str__(self):
return "<ApiImpalaUDF>: %s, %s" % (self.database, self.signature)
class ApiHiveReplicationArguments(BaseApiObject):
_ATTRIBUTES = {
'sourceService' : Attr(ApiServiceRef),
'tableFilters' : Attr(ApiHiveTable),
'exportDir' : None,
'force' : None,
'replicateData' : None,
'hdfsArguments' : Attr(ApiHdfsReplicationArguments),
'dryRun' : None,
'replicateImpalaMetadata' : None,
}
class ApiHiveReplicationResult(BaseApiObject):
_ATTRIBUTES = {
'tableCount' : ROAttr(),
'tables' : ROAttr(ApiHiveTable),
'impalaUDFCount' : ROAttr(),
'impalaUDFs' : ROAttr(ApiImpalaUDF),
'errorCount' : ROAttr(),
'errors' : ROAttr(),
'dataReplicationResult' : ROAttr(ApiHdfsReplicationResult),
'dryRun' : ROAttr(),
'runAsUser' : ROAttr(),
'phase' : ROAttr(),
}
class ApiReplicationCommand(ApiCommand):
@classmethod
def _get_attributes(cls):
if not cls.__dict__.has_key('_ATTRIBUTES'):
attrs = {
'hdfsResult' : ROAttr(ApiHdfsReplicationResult),
'hiveResult' : ROAttr(ApiHiveReplicationResult),
}
attrs.update(ApiCommand._get_attributes())
cls._ATTRIBUTES = attrs
return cls._ATTRIBUTES
class ApiReplicationSchedule(BaseApiObject):
_ATTRIBUTES = {
'startTime' : Attr(datetime.datetime),
'endTime' : Attr(datetime.datetime),
'interval' : None,
'intervalUnit' : None,
'paused' : None,
'hdfsArguments' : Attr(ApiHdfsReplicationArguments),
'hiveArguments' : Attr(ApiHiveReplicationArguments),
'alertOnStart' : None,
'alertOnSuccess' : None,
'alertOnFail' : None,
'alertOnAbort' : None,
'id' : ROAttr(),
'nextRun' : ROAttr(datetime.datetime),
'history' : ROAttr(ApiReplicationCommand),
'active' : None
}
class ApiHBaseSnapshotPolicyArguments(BaseApiObject):
_ATTRIBUTES = {
'tableRegExps' : None,
'storage' : None,
}
class ApiHdfsSnapshotPolicyArguments(BaseApiObject):
_ATTRIBUTES = {
'pathPatterns' : None,
}
class ApiHBaseSnapshot(BaseApiObject):
_ATTRIBUTES = {
'snapshotName' : None,
'tableName' : None,
'creationTime' : ROAttr(datetime.datetime),
'storage' : None,
}
class ApiHBaseSnapshotError(BaseApiObject):
_ATTRIBUTES = {
'tableName' : ROAttr(),
'snapshotName' : ROAttr(),
'error' : ROAttr(),
'storage' : ROAttr(),
}
class ApiHdfsSnapshot(BaseApiObject):
_ATTRIBUTES = {
'path' : None,
'snapshotName' : None,
'snapshotPath' : None,
'creationTime' : ROAttr(datetime.datetime),
}
class ApiHdfsSnapshotError(BaseApiObject):
_ATTRIBUTES = {
'path' : ROAttr(),
'snapshotName' : ROAttr(),
'snapshotPath' : ROAttr(),
'error' : ROAttr(),
}
class ApiHBaseSnapshotResult(BaseApiObject):
_ATTRIBUTES = {
'processedTableCount' : ROAttr(),
'processedTables' : ROAttr(),
'unprocessedTableCount' : ROAttr(),
'unprocessedTables' : ROAttr(),
'createdSnapshotCount' : ROAttr(),
'createdSnapshots' : ROAttr(ApiHBaseSnapshot),
'deletedSnapshotCount' : ROAttr(),
'deletedSnapshots' : ROAttr(ApiHBaseSnapshot),
'creationErrorCount' : ROAttr(),
'creationErrors' : ROAttr(ApiHBaseSnapshotError),
'deletionErrorCount' : ROAttr(),
'deletionErrors' : ROAttr(ApiHBaseSnapshotError),
}
class ApiHdfsSnapshotResult(BaseApiObject):
_ATTRIBUTES = {
'processedPathCount' : ROAttr(),
'processedPaths' : ROAttr(),
'unprocessedPathCount' : ROAttr(),
'unprocessedPaths' : ROAttr(),
'createdSnapshotCount' : ROAttr(),
'createdSnapshots' : ROAttr(ApiHdfsSnapshot),
'deletedSnapshotCount' : ROAttr(),
'deletedSnapshots' : ROAttr(ApiHdfsSnapshot),
'creationErrorCount' : ROAttr(),
'creationErrors' : ROAttr(ApiHdfsSnapshotError),
'deletionErrorCount' : ROAttr(),
'deletionErrors' : ROAttr(ApiHdfsSnapshotError),
}
class ApiSnapshotCommand(BaseApiObject):
@classmethod
def _get_attributes(cls):
if not cls.__dict__.has_key('_ATTRIBUTES'):
attrs = {
'hdfsResult' : ROAttr(ApiHdfsSnapshotResult),
'hbaseResult' : ROAttr(ApiHBaseSnapshotResult),
}
attrs.update(ApiCommand._get_attributes())
cls._ATTRIBUTES = attrs
return cls._ATTRIBUTES
class ApiSnapshotPolicy(BaseApiObject):
"""
@type name: str
@ivar name: Name of the snapshot policy.
@type description: str
@ivar description: Description of the snapshot policy.
@type hourly_snapshots: int
@ivar hourly_snapshots: Number of hourly snapshots to be retained (default: 0).
@type daily_snapshots: int
@ivar daily_snapshots: Number of daily snapshots to be retained (default: 0).
@type weekly_snapshots: int
@ivar weekly_snapshots: Number of weekly snapshots to be retained (default: 0).
@type monthly_snapshots: int
@ivar monthly_snapshots: Number of monthly snapshots to be retained (default: 0).
@type yearly_snapshots: int
@ivar yearly_snapshots: Number of yearly snapshots to be retained (default: 0).
@type hours_for_hourly_snapshots: list of int
@ivar hours_for_hourly_snapshots: Hours of the day that hourly snapshots should be created.
Valid values are 0 to 23. If this list is empty, then hourly snapshots are
created for every hour.
@type minute_of_hour: int
@ivar minute_of_hour: Minute in the hour that hourly, daily, weekly, monthly and yearly
snapshots should be created. Valid values are 0 to 59 (default: 0).
@type hour_of_day: int
@ivar hour_of_day: Hour in the day that daily, weekly, monthly and yearly snapshots should be created.
Valid values are 0 to 23 (default: 0).
@type day_of_week: int
@ivar day_of_week: Day of the week that weekly snapshots should be created.
Valid values are 1 to 7, 1 representing Sunday (default: 1).
@type day_of_month: int
@ivar day_of_month: Day of the month that monthly and yearly snapshots should be created.
Values from 1 to 31 are allowed. Additionally 0 to -30 can be used to
specify offsets from the last day of the month (default: 1).
@type month_of_year: int
@ivar month_of_year: Month of the year that yearly snapshots should be created.
Valid values are 1 to 12, 1 representing January (default: 1).
@ivar alert_on_start: whether to generate alerts on start of snapshot creation/deletion activity.
@ivar alert_on_success: whether to generate alerts on successful completion of snapshot creation/deletion activity.
@ivar alert_on_fail: whether to generate alerts on failure of snapshot creation/deletion activity.
@ivar alert_on_abort: whether to generate alerts on abort of snapshot creation/deletion activity.
@ivar paused: whether to run the policy on schedule
@type hbaseArguments: ApiHBaseSnapshotPolicyArguments
@ivar hbaseArguments: HBase specific arguments for the replication job.
@type hdfsArguments: ApiHdfsSnapshotPolicyArguments
@ivar hdfsArguments: HDFS specific arguments for the replication job.
"""
_ATTRIBUTES = {
'name' : None,
'description' : None,
'hourlySnapshots' : None,
'dailySnapshots' : None,
'weeklySnapshots' : None,
'monthlySnapshots' : None,
'yearlySnapshots' : None,
'minuteOfHour' : None,
'hourOfDay' : None,
'dayOfWeek' : None,
'dayOfMonth' : None,
'monthOfYear' : None,
'hoursForHourlySnapshots' : None,
'alertOnStart' : None,
'alertOnSuccess' : None,
'alertOnFail' : None,
'alertOnAbort' : None,
'paused' : None,
'hbaseArguments' : Attr(ApiHBaseSnapshotPolicyArguments),
'hdfsArguments' : Attr(ApiHdfsSnapshotPolicyArguments),
'lastCommand' : ROAttr(ApiSnapshotCommand),
'lastSuccessfulCommand' : ROAttr(ApiSnapshotCommand),
}
#
# Batch.
#
class ApiBatchRequestElement(BaseApiObject):
"""One element in a batch request."""
_ATTRIBUTES = {
'method' : None,
'url' : None,
'body' : None,
'contentType' : None,
'acceptType' : None,
}
class ApiBatchResponseElement(BaseApiObject):
"""One element in a batch response."""
_ATTRIBUTES = {
'statusCode' : ROAttr(),
'response' : ROAttr(),
}
class ApiBatchResponseList(ApiList):
"""A list of batch response objects."""
_ATTRIBUTES = {
'success' : ROAttr(),
}
_MEMBER_CLASS = ApiBatchResponseElement
#
# Configuration helpers.
#
class ApiConfig(BaseApiObject):
_ATTRIBUTES = {
'name' : None,
'value' : None,
'required' : ROAttr(),
'default' : ROAttr(),
'displayName' : ROAttr(),
'description' : ROAttr(),
'relatedName' : ROAttr(),
'validationState' : ROAttr(),
'validationMessage' : ROAttr(),
'validationWarningsSuppressed' : ROAttr()
}
def __init__(self, resource_root, name=None, value=None):
BaseApiObject.init(self, resource_root, locals())
def __str__(self):
return "<ApiConfig>: %s = %s" % (self.name, self.value)
class ApiImpalaQuery(BaseApiObject):
_ATTRIBUTES = {
'queryId' : ROAttr(),
'queryState' : ROAttr(),
'queryType' : ROAttr(),
'statement' : ROAttr(),
'database' : ROAttr(),
'rowsProduced' : ROAttr(),
'coordinator' : ROAttr(ApiHostRef),
'user' : ROAttr(),
'startTime' : ROAttr(datetime.datetime),
'endTime' : ROAttr(datetime.datetime),
'detailsAvailable' : ROAttr(),
'attributes' : ROAttr(),
'durationMillis' : ROAttr()
}
def __str__(self):
return "<ApiImpalaQuery>: %s" % (self.queryId)
class ApiImpalaQueryResponse(BaseApiObject):
_ATTRIBUTES = {
'queries' : ROAttr(ApiImpalaQuery),
'warnings' : ROAttr()
}
class ApiImpalaQueryDetailsResponse(BaseApiObject):
_ATTRIBUTES = {
'details' : ROAttr()
}
def __str__(self):
return "<AipImpalaQueryDetailsResponse> %s" % self.details
class ApiImpalaCancelResponse(BaseApiObject):
_ATTRIBUTES = {
'warning' : ROAttr()
}
def __str__(self):
return "<ApiImpalaCancelResponse> %s" % self.warning
class ApiImpalaQueryAttribute(BaseApiObject):
_ATTRIBUTES = {
'name' : ROAttr(),
'type' : ROAttr(),
'displayName' : ROAttr(),
'supportsHistograms' : ROAttr(),
'description' : ROAttr()
}
def __str__(self):
return "<ApiImpalaQueryAttribute> %s" % name
class ApiMr2AppInformation(BaseApiObject):
_ATTRIBUTES = {
'jobState' : ROAttr()
}
def __str__(self):
return "<ApiMr2AppInformation>: %s" % (self.jobState)
class ApiYarnApplication(BaseApiObject):
_ATTRIBUTES = {
'applicationId' : ROAttr(),
'name' : ROAttr(),
'user' : ROAttr(),
'startTime' : ROAttr(datetime.datetime),
'endTime' : ROAttr(datetime.datetime),
'pool' : ROAttr(),
'state' : ROAttr(),
'progress' : ROAttr(),
'mr2AppInformation' : ROAttr(ApiMr2AppInformation),
'attributes' : ROAttr(),
'allocatedMB' : ROAttr(),
'allocatedVCores' : ROAttr(),
'runningContainers' : ROAttr(),
'applicationTags' : ROAttr(),
'allocatedMemorySeconds' : ROAttr(),
'allocatedVcoreSeconds' : ROAttr(),
'containerUsedMemorySeconds' : ROAttr(),
'containerUsedCpuSeconds' : ROAttr(),
'containerUsedVcoreSeconds' : ROAttr(),
'containerAllocatedMemorySeconds' : ROAttr(),
'containerAllocatedVcoreSeconds' : ROAttr(),
}
def __str__(self):
return "<ApiYarnApplication>: %s" % (self.applicationId)
class ApiYarnApplicationResponse(BaseApiObject):
_ATTRIBUTES = {
'applications' : ROAttr(ApiYarnApplication),
'warnings' : ROAttr()
}
class ApiYarnKillResponse(BaseApiObject):
_ATTRIBUTES = {
'warning' : ROAttr()
}
def __str__(self):
return "<ApiYarnKillResponse> %s" % self.warning
class ApiYarnApplicationAttribute(BaseApiObject):
_ATTRIBUTES = {
'name' : ROAttr(),
'type' : ROAttr(),
'displayName' : ROAttr(),
'supportsHistograms' : ROAttr(),
'description' : ROAttr()
}
def __str__(self):
return "<ApiYarnApplicationAttribute> %s" % name
class ApiTimeSeriesRequest(BaseApiObject):
_ATTRIBUTES = {
'query' : None,
'from' : None,
'to' : None,
'contentType' : None,
'desiredRollup' : None,
'mustUseDesiredRollup' : None
}
def __str__(self):
return "<ApiTimeSeriesRequest>: %s" % (self.query)
class ApiProductVersion(BaseApiObject):
_ATTRIBUTES = {
'version' : None,
'product' : None,
}
class ApiClusterTemplateConfig(BaseApiObject):
_ATTRIBUTES = {
'name' : None,
'value' : None,
'ref' : None,
'variable' : None,
'autoConfig' : None,
}
class ApiClusterTemplateRoleConfigGroup(BaseApiObject):
_ATTRIBUTES = {
'refName' : None,
'roleType' : None,
'base' : None,
'displayName' : None,
'configs' : Attr(ApiClusterTemplateConfig),
}
class ApiClusterTemplateRole(BaseApiObject):
_ATTRIBUTES = {
'refName' : None,
'roleType' : None,
}
class ApiClusterTemplateHostTemplate(BaseApiObject):
_ATTRIBUTES = {
'refName' : None,
'cardinality' : None,
'roleConfigGroupsRefNames' : None,
}
class ApiClusterTemplateHostInfo(BaseApiObject):
_ATTRIBUTES = {
'hostName' : None,
'hostNameRange' : None,
'rackId' : None,
'hostTemplateRefName' : None,
'roleRefNames' : None,
}
class ApiClusterTemplateVariable(BaseApiObject):
_ATTRIBUTES = {
'name' : None,
'value' : None,
}
class ApiClusterTemplateRoleConfigGroupInfo(BaseApiObject):
_ATTRIBUTES = {
'rcgRefName' : None,
'name' : None,
}
class ApiClusterTemplateInstantiator(BaseApiObject):
_ATTRIBUTES = {
'clusterName' : None,
'hosts' : Attr(ApiClusterTemplateHostInfo),
'variables' : Attr(ApiClusterTemplateVariable),
'roleConfigGroups' : Attr(ApiClusterTemplateRoleConfigGroupInfo),
}
class ApiClusterTemplateService(BaseApiObject):
_ATTRIBUTES = {
'refName' : None,
'serviceType' : None,
'serviceConfigs' : Attr(ApiClusterTemplateConfig),
'roleConfigGroups' : Attr(ApiClusterTemplateRoleConfigGroup),
'displayName' : None,
'roles' : Attr(ApiClusterTemplateRole),
}
class ApiClusterTemplate(BaseApiObject):
_ATTRIBUTES = {
'cdhVersion' : None,
'displayName' : None,
'cmVersion' : None,
"repositories" : None,
'products' : Attr(ApiProductVersion),
'services' : Attr(ApiClusterTemplateService),
'hostTemplates' : Attr(ApiClusterTemplateHostTemplate),
'instantiator' : Attr(ApiClusterTemplateInstantiator),
}
def config_to_api_list(dic):
"""
Converts a python dictionary into a list containing the proper
ApiConfig encoding for configuration data.
@param dic: Key-value pairs to convert.
@return: JSON dictionary of an ApiConfig list (*not* an ApiList).
"""
config = [ ]
for k, v in dic.iteritems():
config.append({ 'name' : k, 'value': v })
return { ApiList.LIST_KEY : config }
def config_to_json(dic):
"""
Converts a python dictionary into a JSON payload.
The payload matches the expected "apiConfig list" type used to update
configuration parameters using the API.
@param dic: Key-value pairs to convert.
@return: String with the JSON-encoded data.
"""
return json.dumps(config_to_api_list(dic))
def json_to_config(dic, full = False):
"""
Converts a JSON-decoded config dictionary to a python dictionary.
When materializing the full view, the values in the dictionary will be
instances of ApiConfig, instead of strings.
@param dic: JSON-decoded config dictionary.
@param full: Whether to materialize the full view of the config data.
@return: Python dictionary with config data.
"""
config = { }
for entry in dic['items']:
k = entry['name']
if full:
config[k] = ApiConfig.from_json_dict(entry, None)
else:
config[k] = entry.get('value')
return config
| 31.951788 | 117 | 0.634485 |
try:
import json
except ImportError:
import simplejson as json
import copy
import datetime
import time
__docformat__ = "epytext"
class Attr(object):
"""
Encapsulates information about an attribute in the JSON encoding of the
object. It identifies properties of the attribute such as whether it's
read-only, its type, etc.
"""
DATE_FMT = "%Y-%m-%dT%H:%M:%S.%fZ"
def __init__(self, atype=None, rw=True, is_api_list=False):
self._atype = atype
self._is_api_list = is_api_list
self.rw = rw
def to_json(self, value, preserve_ro):
"""
Returns the JSON encoding of the given attribute value.
If the value has a 'to_json_dict' object, that method is called. Otherwise,
the following values are returned for each input type:
- datetime.datetime: string with the API representation of a date.
- dictionary: if 'atype' is ApiConfig, a list of ApiConfig objects.
- python list: python list (or ApiList) with JSON encoding of items
- the raw value otherwise
"""
if hasattr(value, 'to_json_dict'):
return value.to_json_dict(preserve_ro)
elif isinstance(value, dict) and self._atype == ApiConfig:
return config_to_api_list(value)
elif isinstance(value, datetime.datetime):
return value.strftime(self.DATE_FMT)
elif isinstance(value, list) or isinstance(value, tuple):
if self._is_api_list:
return ApiList(value).to_json_dict()
else:
return [ self.to_json(x, preserve_ro) for x in value ]
else:
return value
def from_json(self, resource_root, data):
"""
Parses the given JSON value into an appropriate python object.
This means:
- a datetime.datetime if 'atype' is datetime.datetime
- a converted config dictionary or config list if 'atype' is ApiConfig
- if the attr is an API list, an ApiList with instances of 'atype'
- an instance of 'atype' if it has a 'from_json_dict' method
- a python list with decoded versions of the member objects if the input
is a python list.
- the raw value otherwise
"""
if data is None:
return None
if self._atype == datetime.datetime:
return datetime.datetime.strptime(data, self.DATE_FMT)
elif self._atype == ApiConfig:
# ApiConfig is special. We want a python dictionary for summary views,
# but an ApiList for full views. Try to detect each case from the JSON
# data.
if not data['items']:
return { }
first = data['items'][0]
return json_to_config(data, len(first) == 2)
elif self._is_api_list:
return ApiList.from_json_dict(data, resource_root, self._atype)
elif isinstance(data, list):
return [ self.from_json(resource_root, x) for x in data ]
elif hasattr(self._atype, 'from_json_dict'):
return self._atype.from_json_dict(data, resource_root)
else:
return data
class ROAttr(Attr):
"""
Subclass that just defines the attribute as read-only.
"""
def __init__(self, atype=None, is_api_list=False):
Attr.__init__(self, atype=atype, rw=False, is_api_list=is_api_list)
def check_api_version(resource_root, min_version):
"""
Checks if the resource_root's API version it at least the given minimum
version.
"""
if resource_root.version < min_version:
raise Exception("API version %s is required but %s is in use."
% (min_version, resource_root.version))
def call(method, path, ret_type,
ret_is_list=False, data=None, params=None, api_version=1):
"""
Generic function for calling a resource method and automatically dealing with
serialization of parameters and deserialization of return values.
@param method: method to call (must be bound to a resource;
e.g., "resource_root.get").
@param path: the full path of the API method to call.
@param ret_type: return type of the call.
@param ret_is_list: whether the return type is an ApiList.
@param data: Optional data to send as payload to the call.
@param params: Optional query parameters for the call.
@param api_version: minimum API version for the call.
"""
check_api_version(method.im_self, api_version)
if data is not None:
data = json.dumps(Attr(is_api_list=True).to_json(data, False))
ret = method(path, data=data, params=params)
else:
ret = method(path, params=params)
if ret_type is None:
return
elif ret_is_list:
return ApiList.from_json_dict(ret, method.im_self, ret_type)
elif isinstance(ret, list):
return [ ret_type.from_json_dict(x, method.im_self) for x in ret ]
else:
return ret_type.from_json_dict(ret, method.im_self)
class BaseApiObject(object):
"""
The BaseApiObject helps with (de)serialization from/to JSON.
The derived class has two ways of defining custom attributes:
- Overwriting the '_ATTRIBUTES' field with the attribute dictionary
- Override the _get_attributes() method, in case static initialization of
the above field is not possible.
It's recommended that the _get_attributes() implementation do caching to
avoid computing the dictionary on every invocation.
The derived class's constructor must call the base class's init() static
method. All constructor arguments (aside from self and resource_root) must
be keywords arguments with default values (typically None), or
from_json_dict() will not work.
"""
_ATTRIBUTES = { }
_WHITELIST = ( '_resource_root', '_attributes' )
@classmethod
def _get_attributes(cls):
"""
Returns a map of property names to attr instances (or None for default
attribute behavior) describing the properties of the object.
By default, this method will return the class's _ATTRIBUTES field.
Classes can override this method to do custom initialization of the
attributes when needed.
"""
return cls._ATTRIBUTES
@staticmethod
def init(obj, resource_root, attrs=None):
"""
Wraper around the real constructor to avoid issues with the 'self'
argument. Call like this, from a subclass's constructor:
- BaseApiObject.init(self, locals())
"""
# This works around http://bugs.python.org/issue2646
# We use unicode strings as keys in kwargs.
str_attrs = { }
if attrs:
for k, v in attrs.iteritems():
if k not in ('self', 'resource_root'):
str_attrs[k] = v
BaseApiObject.__init__(obj, resource_root, **str_attrs)
def __init__(self, resource_root, **attrs):
"""
Initializes internal state and sets all known writable properties of the
object to None. Then initializes the properties given in the provided
attributes dictionary.
@param resource_root: API resource object.
@param attrs: optional dictionary of attributes to set. This should only
contain r/w attributes.
"""
self._resource_root = resource_root
for name, attr in self._get_attributes().iteritems():
object.__setattr__(self, name, None)
if attrs:
self._set_attrs(attrs, from_json=False)
def _set_attrs(self, attrs, allow_ro=False, from_json=True):
"""
Sets all the attributes in the dictionary. Optionally, allows setting
read-only attributes (e.g. when deserializing from JSON) and skipping
JSON deserialization of values.
"""
for k, v in attrs.iteritems():
attr = self._check_attr(k, allow_ro)
if attr and from_json:
v = attr.from_json(self._get_resource_root(), v)
object.__setattr__(self, k, v)
def __setattr__(self, name, val):
if name not in BaseApiObject._WHITELIST:
self._check_attr(name, False)
object.__setattr__(self, name, val)
def _check_attr(self, name, allow_ro):
if name not in self._get_attributes():
raise AttributeError('Invalid property %s for class %s.' %
(name, self.__class__.__name__))
attr = self._get_attributes()[name]
if not allow_ro and attr and not attr.rw:
raise AttributeError('Attribute %s of class %s is read only.' %
(name, self.__class__.__name__))
return attr
def _get_resource_root(self):
return self._resource_root
def _update(self, api_obj):
"""Copy state from api_obj to this object."""
if not isinstance(self, api_obj.__class__):
raise ValueError(
"Class %s does not derive from %s; cannot update attributes." %
(self.__class__, api_obj.__class__))
for name in self._get_attributes().keys():
try:
val = getattr(api_obj, name)
setattr(self, name, val)
except AttributeError, ignored:
pass
def to_json_dict(self, preserve_ro=False):
dic = { }
for name, attr in self._get_attributes().iteritems():
if not preserve_ro and attr and not attr.rw:
continue
try:
value = getattr(self, name)
if value is not None:
if attr:
dic[name] = attr.to_json(value, preserve_ro)
else:
dic[name] = value
except AttributeError:
pass
return dic
def __str__(self):
"""
Default implementation of __str__. Uses the type name and the first
attribute retrieved from the attribute map to create the string.
"""
name = self._get_attributes().keys()[0]
value = getattr(self, name, None)
return "<%s>: %s = %s" % (self.__class__.__name__, name, value)
@classmethod
def from_json_dict(cls, dic, resource_root):
obj = cls(resource_root)
obj._set_attrs(dic, allow_ro=True)
return obj
class BaseApiResource(BaseApiObject):
"""
A specialization of BaseApiObject that provides some utility methods for
resources. This class allows easier serialization / deserialization of
parameters and return values.
"""
def _api_version(self):
"""
Returns the minimum API version for this resource. Defaults to 1.
"""
return 1
def _path(self):
"""
Returns the path to the resource.
e.g., for a service 'foo' in cluster 'bar', this should return
'/clusters/bar/services/foo'.
"""
raise NotImplementedError
def _require_min_api_version(self, version):
"""
Raise an exception if the version of the api is less than the given version.
@param version: The minimum required version.
"""
actual_version = self._get_resource_root().version
version = max(version, self._api_version())
if actual_version < version:
raise Exception("API version %s is required but %s is in use."
% (version, actual_version))
def _cmd(self, command, data=None, params=None, api_version=1):
"""
Invokes a command on the resource. Commands are expected to be under the
"commands/" sub-resource.
"""
return self._post("commands/" + command, ApiCommand,
data=data, params=params, api_version=api_version)
def _get_config(self, rel_path, view, api_version=1):
"""
Retrieves an ApiConfig list from the given relative path.
"""
self._require_min_api_version(api_version)
params = view and dict(view=view) or None
resp = self._get_resource_root().get(self._path() + '/' + rel_path,
params=params)
return json_to_config(resp, view == 'full')
def _update_config(self, rel_path, config, api_version=1):
self._require_min_api_version(api_version)
resp = self._get_resource_root().put(self._path() + '/' + rel_path,
data=config_to_json(config))
return json_to_config(resp, False)
def _delete(self, rel_path, ret_type, ret_is_list=False, params=None,
api_version=1):
return self._call('delete', rel_path, ret_type, ret_is_list, None, params,
api_version)
def _get(self, rel_path, ret_type, ret_is_list=False, params=None,
api_version=1):
return self._call('get', rel_path, ret_type, ret_is_list, None, params,
api_version)
def _post(self, rel_path, ret_type, ret_is_list=False, data=None, params=None,
api_version=1):
return self._call('post', rel_path, ret_type, ret_is_list, data, params,
api_version)
def _put(self, rel_path, ret_type, ret_is_list=False, data=None, params=None,
api_version=1):
return self._call('put', rel_path, ret_type, ret_is_list, data, params,
api_version)
def _call(self, method, rel_path, ret_type, ret_is_list=False, data=None,
params=None, api_version=1):
path = self._path()
if rel_path:
path += '/' + rel_path
return call(getattr(self._get_resource_root(), method),
path,
ret_type,
ret_is_list,
data,
params,
api_version)
class ApiList(BaseApiObject):
"""A list of some api object"""
LIST_KEY = "items"
def __init__(self, objects, resource_root=None, **attrs):
BaseApiObject.__init__(self, resource_root, **attrs)
# Bypass checks in BaseApiObject.__setattr__
object.__setattr__(self, 'objects', objects)
def __str__(self):
return "<ApiList>(%d): [%s]" % (
len(self.objects),
", ".join([str(item) for item in self.objects]))
def to_json_dict(self, preserve_ro=False):
ret = BaseApiObject.to_json_dict(self, preserve_ro)
attr = Attr()
ret[ApiList.LIST_KEY] = [ attr.to_json(x, preserve_ro) for x in self.objects ]
return ret
def __len__(self):
return self.objects.__len__()
def __iter__(self):
return self.objects.__iter__()
def __getitem__(self, i):
return self.objects.__getitem__(i)
def __getslice(self, i, j):
return self.objects.__getslice__(i, j)
@classmethod
def from_json_dict(cls, dic, resource_root, member_cls=None):
if not member_cls:
member_cls = cls._MEMBER_CLASS
attr = Attr(atype=member_cls)
items = []
if ApiList.LIST_KEY in dic:
items = [ attr.from_json(resource_root, x) for x in dic[ApiList.LIST_KEY] ]
ret = cls(items)
# If the class declares custom attributes, populate them based on the input
# dict. The check avoids extra overhead for the common case, where we just
# have a plain list. _set_attrs() also does not understand the "items"
# attribute, so it can't be in the input data.
if cls._ATTRIBUTES:
if ApiList.LIST_KEY in dic:
dic = copy.copy(dic)
del dic[ApiList.LIST_KEY]
ret._set_attrs(dic, allow_ro=True)
return ret
class ApiHostRef(BaseApiObject):
_ATTRIBUTES = {
'hostId' : None,
}
def __init__(self, resource_root, hostId=None):
BaseApiObject.init(self, resource_root, locals())
def __str__(self):
return "<ApiHostRef>: %s" % (self.hostId)
class ApiServiceRef(BaseApiObject):
_ATTRIBUTES = {
'clusterName' : None,
'serviceName' : None,
'peerName' : None,
}
def __init__(self, resource_root, serviceName=None, clusterName=None,
peerName=None):
BaseApiObject.init(self, resource_root, locals())
class ApiClusterRef(BaseApiObject):
_ATTRIBUTES = {
'clusterName' : None,
}
def __init__(self, resource_root, clusterName = None):
BaseApiObject.init(self, resource_root, locals())
class ApiRoleRef(BaseApiObject):
_ATTRIBUTES = {
'clusterName' : None,
'serviceName' : None,
'roleName' : None,
}
def __init__(self, resource_root, serviceName=None, roleName=None,
clusterName=None):
BaseApiObject.init(self, resource_root, locals())
class ApiRoleConfigGroupRef(BaseApiObject):
_ATTRIBUTES = {
'roleConfigGroupName' : None,
}
def __init__(self, resource_root, roleConfigGroupName=None):
BaseApiObject.init(self, resource_root, locals())
class ApiCommand(BaseApiObject):
SYNCHRONOUS_COMMAND_ID = -1
@classmethod
def _get_attributes(cls):
if not cls.__dict__.has_key('_ATTRIBUTES'):
cls._ATTRIBUTES = {
'id' : ROAttr(),
'name' : ROAttr(),
'startTime' : ROAttr(datetime.datetime),
'endTime' : ROAttr(datetime.datetime),
'active' : ROAttr(),
'success' : ROAttr(),
'resultMessage' : ROAttr(),
'clusterRef' : ROAttr(ApiClusterRef),
'serviceRef' : ROAttr(ApiServiceRef),
'roleRef' : ROAttr(ApiRoleRef),
'hostRef' : ROAttr(ApiHostRef),
'children' : ROAttr(ApiCommand, is_api_list=True),
'parent' : ROAttr(ApiCommand),
'resultDataUrl' : ROAttr(),
'canRetry' : ROAttr(),
}
return cls._ATTRIBUTES
def __str__(self):
return "<ApiCommand>: '%s' (id: %s; active: %s; success: %s)" % (
self.name, self.id, self.active, self.success)
def _path(self):
return '/commands/%d' % self.id
def fetch(self):
"""
Retrieve updated data about the command from the server.
@return: A new ApiCommand object.
"""
if self.id == ApiCommand.SYNCHRONOUS_COMMAND_ID:
return self
resp = self._get_resource_root().get(self._path())
return ApiCommand.from_json_dict(resp, self._get_resource_root())
def wait(self, timeout=None):
"""
Wait for command to finish.
@param timeout: (Optional) Max amount of time (in seconds) to wait. Wait
forever by default.
@return: The final ApiCommand object, containing the last known state.
The command may still be running in case of timeout.
"""
if self.id == ApiCommand.SYNCHRONOUS_COMMAND_ID:
return self
SLEEP_SEC = 5
if timeout is None:
deadline = None
else:
deadline = time.time() + timeout
while True:
cmd = self.fetch()
if not cmd.active:
return cmd
if deadline is not None:
now = time.time()
if deadline < now:
return cmd
else:
time.sleep(min(SLEEP_SEC, deadline - now))
else:
time.sleep(SLEEP_SEC)
def abort(self):
"""
Abort a running command.
@return: A new ApiCommand object with the updated information.
"""
if self.id == ApiCommand.SYNCHRONOUS_COMMAND_ID:
return self
path = self._path() + '/abort'
resp = self._get_resource_root().post(path)
return ApiCommand.from_json_dict(resp, self._get_resource_root())
def retry(self):
"""
Retry a failed or aborted command.
Note: The retry will only work for ClusterUpgrade command for now.
@return: A new ApiCommand object with the updated information.
"""
path = self._path() + '/retry'
resp = self._get_resource_root().post(path)
return ApiCommand.from_json_dict(resp, self._get_resource_root())
class ApiBulkCommandList(ApiList):
_ATTRIBUTES = {
'errors' : ROAttr(),
}
_MEMBER_CLASS = ApiCommand
class ApiCommandMetadata(BaseApiObject):
_ATTRIBUTES = {
'name' : ROAttr(),
'argSchema' : ROAttr(),
}
def __init__(self, resource_root):
BaseApiObject.init(self, resource_root)
def __str__(self):
return "<ApiCommandMetadata>: %s (%s)" % (self.name, self.argSchema)
class ApiMetricData(BaseApiObject):
"""Metric reading data."""
_ATTRIBUTES = {
'timestamp' : ROAttr(datetime.datetime),
'value' : ROAttr(),
}
def __init__(self, resource_root):
BaseApiObject.init(self, resource_root)
class ApiMetric(BaseApiObject):
"""Metric information."""
_ATTRIBUTES = {
'name' : ROAttr(),
'context' : ROAttr(),
'unit' : ROAttr(),
'data' : ROAttr(ApiMetricData),
'displayName' : ROAttr(),
'description' : ROAttr(),
}
def __init__(self, resource_root):
BaseApiObject.init(self, resource_root)
class ApiActivity(BaseApiObject):
_ATTRIBUTES = {
'name' : ROAttr(),
'type' : ROAttr(),
'parent' : ROAttr(),
'startTime' : ROAttr(),
'finishTime' : ROAttr(),
'id' : ROAttr(),
'status' : ROAttr(),
'user' : ROAttr(),
'group' : ROAttr(),
'inputDir' : ROAttr(),
'outputDir' : ROAttr(),
'mapper' : ROAttr(),
'combiner' : ROAttr(),
'reducer' : ROAttr(),
'queueName' : ROAttr(),
'schedulerPriority' : ROAttr(),
}
def __init__(self, resource_root):
BaseApiObject.init(self, resource_root)
def __str__(self):
return "<ApiActivity>: %s (%s)" % (self.name, self.status)
class ApiCmPeer(BaseApiObject):
_ATTRIBUTES = {
'name' : None,
'url' : None,
'username' : None,
'password' : None,
'type' : None,
'clouderaManagerCreatedUser' : None,
}
def __str__(self):
return "<ApiPeer>: %s (%s)" % (self.name, self.url)
class ApiLicensedFeatureUsage(BaseApiObject):
_ATTRIBUTES = {
'totals' : ROAttr(),
'clusters' : ROAttr(),
}
class ApiHdfsReplicationArguments(BaseApiObject):
_ATTRIBUTES = {
'sourceService' : Attr(ApiServiceRef),
'sourcePath' : None,
'destinationPath' : None,
'mapreduceServiceName' : None,
'userName' : None,
'numMaps' : None,
'dryRun' : None,
'bandwidthPerMap' : None,
'logPath' : None,
'schedulerPoolName' : None,
'abortOnError' : None,
'preservePermissions' : None,
'preserveBlockSize' : None,
'preserveReplicationCount' : None,
'removeMissingFiles' : None,
'skipChecksumChecks' : None,
'skipTrash' : None,
'replicationStrategy' : None,
'preserveXAttrs' : None,
'exclusionFilters' : None,
}
class ApiHdfsReplicationResult(BaseApiObject):
_ATTRIBUTES = {
'progress' : ROAttr(),
'counters' : ROAttr(),
'numBytesDryRun' : ROAttr(),
'numFilesDryRun' : ROAttr(),
'numFilesExpected' : ROAttr(),
'numBytesExpected' : ROAttr(),
'numFilesCopied' : ROAttr(),
'numBytesCopied' : ROAttr(),
'numFilesSkipped' : ROAttr(),
'numBytesSkipped' : ROAttr(),
'numFilesDeleted' : ROAttr(),
'numFilesCopyFailed' : ROAttr(),
'numBytesCopyFailed' : ROAttr(),
'setupError' : ROAttr(),
'jobId' : ROAttr(),
'jobDetailsUri' : ROAttr(),
'dryRun' : ROAttr(),
'snapshottedDirs' : ROAttr(),
'failedFiles' : ROAttr(),
'runAsUser' : ROAttr(),
}
class ApiHiveTable(BaseApiObject):
_ATTRIBUTES = {
'database' : None,
'tableName' : None,
}
def __str__(self):
return "<ApiHiveTable>: %s, %s" % (self.database, self.tableName)
class ApiImpalaUDF(BaseApiObject):
_ATTRIBUTES = {
'database' : ROAttr(),
'signature' : ROAttr(),
}
def __str__(self):
return "<ApiImpalaUDF>: %s, %s" % (self.database, self.signature)
class ApiHiveReplicationArguments(BaseApiObject):
_ATTRIBUTES = {
'sourceService' : Attr(ApiServiceRef),
'tableFilters' : Attr(ApiHiveTable),
'exportDir' : None,
'force' : None,
'replicateData' : None,
'hdfsArguments' : Attr(ApiHdfsReplicationArguments),
'dryRun' : None,
'replicateImpalaMetadata' : None,
}
class ApiHiveReplicationResult(BaseApiObject):
_ATTRIBUTES = {
'tableCount' : ROAttr(),
'tables' : ROAttr(ApiHiveTable),
'impalaUDFCount' : ROAttr(),
'impalaUDFs' : ROAttr(ApiImpalaUDF),
'errorCount' : ROAttr(),
'errors' : ROAttr(),
'dataReplicationResult' : ROAttr(ApiHdfsReplicationResult),
'dryRun' : ROAttr(),
'runAsUser' : ROAttr(),
'phase' : ROAttr(),
}
class ApiReplicationCommand(ApiCommand):
@classmethod
def _get_attributes(cls):
if not cls.__dict__.has_key('_ATTRIBUTES'):
attrs = {
'hdfsResult' : ROAttr(ApiHdfsReplicationResult),
'hiveResult' : ROAttr(ApiHiveReplicationResult),
}
attrs.update(ApiCommand._get_attributes())
cls._ATTRIBUTES = attrs
return cls._ATTRIBUTES
class ApiReplicationSchedule(BaseApiObject):
_ATTRIBUTES = {
'startTime' : Attr(datetime.datetime),
'endTime' : Attr(datetime.datetime),
'interval' : None,
'intervalUnit' : None,
'paused' : None,
'hdfsArguments' : Attr(ApiHdfsReplicationArguments),
'hiveArguments' : Attr(ApiHiveReplicationArguments),
'alertOnStart' : None,
'alertOnSuccess' : None,
'alertOnFail' : None,
'alertOnAbort' : None,
'id' : ROAttr(),
'nextRun' : ROAttr(datetime.datetime),
'history' : ROAttr(ApiReplicationCommand),
'active' : None
}
class ApiHBaseSnapshotPolicyArguments(BaseApiObject):
_ATTRIBUTES = {
'tableRegExps' : None,
'storage' : None,
}
class ApiHdfsSnapshotPolicyArguments(BaseApiObject):
_ATTRIBUTES = {
'pathPatterns' : None,
}
class ApiHBaseSnapshot(BaseApiObject):
_ATTRIBUTES = {
'snapshotName' : None,
'tableName' : None,
'creationTime' : ROAttr(datetime.datetime),
'storage' : None,
}
class ApiHBaseSnapshotError(BaseApiObject):
_ATTRIBUTES = {
'tableName' : ROAttr(),
'snapshotName' : ROAttr(),
'error' : ROAttr(),
'storage' : ROAttr(),
}
class ApiHdfsSnapshot(BaseApiObject):
_ATTRIBUTES = {
'path' : None,
'snapshotName' : None,
'snapshotPath' : None,
'creationTime' : ROAttr(datetime.datetime),
}
class ApiHdfsSnapshotError(BaseApiObject):
_ATTRIBUTES = {
'path' : ROAttr(),
'snapshotName' : ROAttr(),
'snapshotPath' : ROAttr(),
'error' : ROAttr(),
}
class ApiHBaseSnapshotResult(BaseApiObject):
_ATTRIBUTES = {
'processedTableCount' : ROAttr(),
'processedTables' : ROAttr(),
'unprocessedTableCount' : ROAttr(),
'unprocessedTables' : ROAttr(),
'createdSnapshotCount' : ROAttr(),
'createdSnapshots' : ROAttr(ApiHBaseSnapshot),
'deletedSnapshotCount' : ROAttr(),
'deletedSnapshots' : ROAttr(ApiHBaseSnapshot),
'creationErrorCount' : ROAttr(),
'creationErrors' : ROAttr(ApiHBaseSnapshotError),
'deletionErrorCount' : ROAttr(),
'deletionErrors' : ROAttr(ApiHBaseSnapshotError),
}
class ApiHdfsSnapshotResult(BaseApiObject):
_ATTRIBUTES = {
'processedPathCount' : ROAttr(),
'processedPaths' : ROAttr(),
'unprocessedPathCount' : ROAttr(),
'unprocessedPaths' : ROAttr(),
'createdSnapshotCount' : ROAttr(),
'createdSnapshots' : ROAttr(ApiHdfsSnapshot),
'deletedSnapshotCount' : ROAttr(),
'deletedSnapshots' : ROAttr(ApiHdfsSnapshot),
'creationErrorCount' : ROAttr(),
'creationErrors' : ROAttr(ApiHdfsSnapshotError),
'deletionErrorCount' : ROAttr(),
'deletionErrors' : ROAttr(ApiHdfsSnapshotError),
}
class ApiSnapshotCommand(BaseApiObject):
@classmethod
def _get_attributes(cls):
if not cls.__dict__.has_key('_ATTRIBUTES'):
attrs = {
'hdfsResult' : ROAttr(ApiHdfsSnapshotResult),
'hbaseResult' : ROAttr(ApiHBaseSnapshotResult),
}
attrs.update(ApiCommand._get_attributes())
cls._ATTRIBUTES = attrs
return cls._ATTRIBUTES
class ApiSnapshotPolicy(BaseApiObject):
"""
@type name: str
@ivar name: Name of the snapshot policy.
@type description: str
@ivar description: Description of the snapshot policy.
@type hourly_snapshots: int
@ivar hourly_snapshots: Number of hourly snapshots to be retained (default: 0).
@type daily_snapshots: int
@ivar daily_snapshots: Number of daily snapshots to be retained (default: 0).
@type weekly_snapshots: int
@ivar weekly_snapshots: Number of weekly snapshots to be retained (default: 0).
@type monthly_snapshots: int
@ivar monthly_snapshots: Number of monthly snapshots to be retained (default: 0).
@type yearly_snapshots: int
@ivar yearly_snapshots: Number of yearly snapshots to be retained (default: 0).
@type hours_for_hourly_snapshots: list of int
@ivar hours_for_hourly_snapshots: Hours of the day that hourly snapshots should be created.
Valid values are 0 to 23. If this list is empty, then hourly snapshots are
created for every hour.
@type minute_of_hour: int
@ivar minute_of_hour: Minute in the hour that hourly, daily, weekly, monthly and yearly
snapshots should be created. Valid values are 0 to 59 (default: 0).
@type hour_of_day: int
@ivar hour_of_day: Hour in the day that daily, weekly, monthly and yearly snapshots should be created.
Valid values are 0 to 23 (default: 0).
@type day_of_week: int
@ivar day_of_week: Day of the week that weekly snapshots should be created.
Valid values are 1 to 7, 1 representing Sunday (default: 1).
@type day_of_month: int
@ivar day_of_month: Day of the month that monthly and yearly snapshots should be created.
Values from 1 to 31 are allowed. Additionally 0 to -30 can be used to
specify offsets from the last day of the month (default: 1).
@type month_of_year: int
@ivar month_of_year: Month of the year that yearly snapshots should be created.
Valid values are 1 to 12, 1 representing January (default: 1).
@ivar alert_on_start: whether to generate alerts on start of snapshot creation/deletion activity.
@ivar alert_on_success: whether to generate alerts on successful completion of snapshot creation/deletion activity.
@ivar alert_on_fail: whether to generate alerts on failure of snapshot creation/deletion activity.
@ivar alert_on_abort: whether to generate alerts on abort of snapshot creation/deletion activity.
@ivar paused: whether to run the policy on schedule
@type hbaseArguments: ApiHBaseSnapshotPolicyArguments
@ivar hbaseArguments: HBase specific arguments for the replication job.
@type hdfsArguments: ApiHdfsSnapshotPolicyArguments
@ivar hdfsArguments: HDFS specific arguments for the replication job.
"""
_ATTRIBUTES = {
'name' : None,
'description' : None,
'hourlySnapshots' : None,
'dailySnapshots' : None,
'weeklySnapshots' : None,
'monthlySnapshots' : None,
'yearlySnapshots' : None,
'minuteOfHour' : None,
'hourOfDay' : None,
'dayOfWeek' : None,
'dayOfMonth' : None,
'monthOfYear' : None,
'hoursForHourlySnapshots' : None,
'alertOnStart' : None,
'alertOnSuccess' : None,
'alertOnFail' : None,
'alertOnAbort' : None,
'paused' : None,
'hbaseArguments' : Attr(ApiHBaseSnapshotPolicyArguments),
'hdfsArguments' : Attr(ApiHdfsSnapshotPolicyArguments),
'lastCommand' : ROAttr(ApiSnapshotCommand),
'lastSuccessfulCommand' : ROAttr(ApiSnapshotCommand),
}
class ApiBatchRequestElement(BaseApiObject):
"""One element in a batch request."""
_ATTRIBUTES = {
'method' : None,
'url' : None,
'body' : None,
'contentType' : None,
'acceptType' : None,
}
class ApiBatchResponseElement(BaseApiObject):
"""One element in a batch response."""
_ATTRIBUTES = {
'statusCode' : ROAttr(),
'response' : ROAttr(),
}
class ApiBatchResponseList(ApiList):
"""A list of batch response objects."""
_ATTRIBUTES = {
'success' : ROAttr(),
}
_MEMBER_CLASS = ApiBatchResponseElement
class ApiConfig(BaseApiObject):
_ATTRIBUTES = {
'name' : None,
'value' : None,
'required' : ROAttr(),
'default' : ROAttr(),
'displayName' : ROAttr(),
'description' : ROAttr(),
'relatedName' : ROAttr(),
'validationState' : ROAttr(),
'validationMessage' : ROAttr(),
'validationWarningsSuppressed' : ROAttr()
}
def __init__(self, resource_root, name=None, value=None):
BaseApiObject.init(self, resource_root, locals())
def __str__(self):
return "<ApiConfig>: %s = %s" % (self.name, self.value)
class ApiImpalaQuery(BaseApiObject):
_ATTRIBUTES = {
'queryId' : ROAttr(),
'queryState' : ROAttr(),
'queryType' : ROAttr(),
'statement' : ROAttr(),
'database' : ROAttr(),
'rowsProduced' : ROAttr(),
'coordinator' : ROAttr(ApiHostRef),
'user' : ROAttr(),
'startTime' : ROAttr(datetime.datetime),
'endTime' : ROAttr(datetime.datetime),
'detailsAvailable' : ROAttr(),
'attributes' : ROAttr(),
'durationMillis' : ROAttr()
}
def __str__(self):
return "<ApiImpalaQuery>: %s" % (self.queryId)
class ApiImpalaQueryResponse(BaseApiObject):
_ATTRIBUTES = {
'queries' : ROAttr(ApiImpalaQuery),
'warnings' : ROAttr()
}
class ApiImpalaQueryDetailsResponse(BaseApiObject):
_ATTRIBUTES = {
'details' : ROAttr()
}
def __str__(self):
return "<AipImpalaQueryDetailsResponse> %s" % self.details
class ApiImpalaCancelResponse(BaseApiObject):
_ATTRIBUTES = {
'warning' : ROAttr()
}
def __str__(self):
return "<ApiImpalaCancelResponse> %s" % self.warning
class ApiImpalaQueryAttribute(BaseApiObject):
_ATTRIBUTES = {
'name' : ROAttr(),
'type' : ROAttr(),
'displayName' : ROAttr(),
'supportsHistograms' : ROAttr(),
'description' : ROAttr()
}
def __str__(self):
return "<ApiImpalaQueryAttribute> %s" % name
class ApiMr2AppInformation(BaseApiObject):
_ATTRIBUTES = {
'jobState' : ROAttr()
}
def __str__(self):
return "<ApiMr2AppInformation>: %s" % (self.jobState)
class ApiYarnApplication(BaseApiObject):
_ATTRIBUTES = {
'applicationId' : ROAttr(),
'name' : ROAttr(),
'user' : ROAttr(),
'startTime' : ROAttr(datetime.datetime),
'endTime' : ROAttr(datetime.datetime),
'pool' : ROAttr(),
'state' : ROAttr(),
'progress' : ROAttr(),
'mr2AppInformation' : ROAttr(ApiMr2AppInformation),
'attributes' : ROAttr(),
'allocatedMB' : ROAttr(),
'allocatedVCores' : ROAttr(),
'runningContainers' : ROAttr(),
'applicationTags' : ROAttr(),
'allocatedMemorySeconds' : ROAttr(),
'allocatedVcoreSeconds' : ROAttr(),
'containerUsedMemorySeconds' : ROAttr(),
'containerUsedCpuSeconds' : ROAttr(),
'containerUsedVcoreSeconds' : ROAttr(),
'containerAllocatedMemorySeconds' : ROAttr(),
'containerAllocatedVcoreSeconds' : ROAttr(),
}
def __str__(self):
return "<ApiYarnApplication>: %s" % (self.applicationId)
class ApiYarnApplicationResponse(BaseApiObject):
_ATTRIBUTES = {
'applications' : ROAttr(ApiYarnApplication),
'warnings' : ROAttr()
}
class ApiYarnKillResponse(BaseApiObject):
_ATTRIBUTES = {
'warning' : ROAttr()
}
def __str__(self):
return "<ApiYarnKillResponse> %s" % self.warning
class ApiYarnApplicationAttribute(BaseApiObject):
_ATTRIBUTES = {
'name' : ROAttr(),
'type' : ROAttr(),
'displayName' : ROAttr(),
'supportsHistograms' : ROAttr(),
'description' : ROAttr()
}
def __str__(self):
return "<ApiYarnApplicationAttribute> %s" % name
class ApiTimeSeriesRequest(BaseApiObject):
_ATTRIBUTES = {
'query' : None,
'from' : None,
'to' : None,
'contentType' : None,
'desiredRollup' : None,
'mustUseDesiredRollup' : None
}
def __str__(self):
return "<ApiTimeSeriesRequest>: %s" % (self.query)
class ApiProductVersion(BaseApiObject):
_ATTRIBUTES = {
'version' : None,
'product' : None,
}
class ApiClusterTemplateConfig(BaseApiObject):
_ATTRIBUTES = {
'name' : None,
'value' : None,
'ref' : None,
'variable' : None,
'autoConfig' : None,
}
class ApiClusterTemplateRoleConfigGroup(BaseApiObject):
_ATTRIBUTES = {
'refName' : None,
'roleType' : None,
'base' : None,
'displayName' : None,
'configs' : Attr(ApiClusterTemplateConfig),
}
class ApiClusterTemplateRole(BaseApiObject):
_ATTRIBUTES = {
'refName' : None,
'roleType' : None,
}
class ApiClusterTemplateHostTemplate(BaseApiObject):
_ATTRIBUTES = {
'refName' : None,
'cardinality' : None,
'roleConfigGroupsRefNames' : None,
}
class ApiClusterTemplateHostInfo(BaseApiObject):
_ATTRIBUTES = {
'hostName' : None,
'hostNameRange' : None,
'rackId' : None,
'hostTemplateRefName' : None,
'roleRefNames' : None,
}
class ApiClusterTemplateVariable(BaseApiObject):
_ATTRIBUTES = {
'name' : None,
'value' : None,
}
class ApiClusterTemplateRoleConfigGroupInfo(BaseApiObject):
_ATTRIBUTES = {
'rcgRefName' : None,
'name' : None,
}
class ApiClusterTemplateInstantiator(BaseApiObject):
_ATTRIBUTES = {
'clusterName' : None,
'hosts' : Attr(ApiClusterTemplateHostInfo),
'variables' : Attr(ApiClusterTemplateVariable),
'roleConfigGroups' : Attr(ApiClusterTemplateRoleConfigGroupInfo),
}
class ApiClusterTemplateService(BaseApiObject):
_ATTRIBUTES = {
'refName' : None,
'serviceType' : None,
'serviceConfigs' : Attr(ApiClusterTemplateConfig),
'roleConfigGroups' : Attr(ApiClusterTemplateRoleConfigGroup),
'displayName' : None,
'roles' : Attr(ApiClusterTemplateRole),
}
class ApiClusterTemplate(BaseApiObject):
_ATTRIBUTES = {
'cdhVersion' : None,
'displayName' : None,
'cmVersion' : None,
"repositories" : None,
'products' : Attr(ApiProductVersion),
'services' : Attr(ApiClusterTemplateService),
'hostTemplates' : Attr(ApiClusterTemplateHostTemplate),
'instantiator' : Attr(ApiClusterTemplateInstantiator),
}
def config_to_api_list(dic):
"""
Converts a python dictionary into a list containing the proper
ApiConfig encoding for configuration data.
@param dic: Key-value pairs to convert.
@return: JSON dictionary of an ApiConfig list (*not* an ApiList).
"""
config = [ ]
for k, v in dic.iteritems():
config.append({ 'name' : k, 'value': v })
return { ApiList.LIST_KEY : config }
def config_to_json(dic):
"""
Converts a python dictionary into a JSON payload.
The payload matches the expected "apiConfig list" type used to update
configuration parameters using the API.
@param dic: Key-value pairs to convert.
@return: String with the JSON-encoded data.
"""
return json.dumps(config_to_api_list(dic))
def json_to_config(dic, full = False):
"""
Converts a JSON-decoded config dictionary to a python dictionary.
When materializing the full view, the values in the dictionary will be
instances of ApiConfig, instead of strings.
@param dic: JSON-decoded config dictionary.
@param full: Whether to materialize the full view of the config data.
@return: Python dictionary with config data.
"""
config = { }
for entry in dic['items']:
k = entry['name']
if full:
config[k] = ApiConfig.from_json_dict(entry, None)
else:
config[k] = entry.get('value')
return config
| false | true |
f71ac1809f6473acb6bd2afca69ff45e16538c2b | 12,263 | py | Python | tests/python/unittest/test_gluon_rnn.py | ymaxgit/mxnet | 01ae629c6593e0352fd30979bccd0196854ef882 | [
"Apache-2.0"
] | 1 | 2022-03-03T18:36:42.000Z | 2022-03-03T18:36:42.000Z | tests/python/unittest/test_gluon_rnn.py | ymaxgit/mxnet | 01ae629c6593e0352fd30979bccd0196854ef882 | [
"Apache-2.0"
] | 1 | 2022-02-28T21:23:12.000Z | 2022-03-03T18:33:42.000Z | tests/python/unittest/test_gluon_rnn.py | ymaxgit/mxnet | 01ae629c6593e0352fd30979bccd0196854ef882 | [
"Apache-2.0"
] | 1 | 2022-03-03T18:36:37.000Z | 2022-03-03T18:36:37.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
from mxnet import gluon
import numpy as np
from numpy.testing import assert_allclose
import unittest
from mxnet.test_utils import almost_equal
def test_rnn():
cell = gluon.rnn.RNNCell(100, prefix='rnn_')
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs)
outputs = mx.sym.Group(outputs)
assert sorted(cell.collect_params().keys()) == ['rnn_h2h_bias', 'rnn_h2h_weight', 'rnn_i2h_bias', 'rnn_i2h_weight']
assert outputs.list_outputs() == ['rnn_t0_out_output', 'rnn_t1_out_output', 'rnn_t2_out_output']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))
assert outs == [(10, 100), (10, 100), (10, 100)]
def test_lstm():
cell = gluon.rnn.LSTMCell(100, prefix='rnn_')
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs)
outputs = mx.sym.Group(outputs)
assert sorted(cell.collect_params().keys()) == ['rnn_h2h_bias', 'rnn_h2h_weight', 'rnn_i2h_bias', 'rnn_i2h_weight']
assert outputs.list_outputs() == ['rnn_t0_out_output', 'rnn_t1_out_output', 'rnn_t2_out_output']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))
assert outs == [(10, 100), (10, 100), (10, 100)]
def test_lstm_forget_bias():
forget_bias = 2.0
stack = gluon.rnn.SequentialRNNCell()
stack.add(gluon.rnn.LSTMCell(100, i2h_bias_initializer=mx.init.LSTMBias(forget_bias), prefix='l0_'))
stack.add(gluon.rnn.LSTMCell(100, i2h_bias_initializer=mx.init.LSTMBias(forget_bias), prefix='l1_'))
dshape = (32, 1, 200)
data = mx.sym.Variable('data')
sym, _ = stack.unroll(1, data, merge_outputs=True)
mod = mx.mod.Module(sym, label_names=None, context=mx.cpu(0))
mod.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod.init_params()
bias_argument = next(x for x in sym.list_arguments() if x.endswith('i2h_bias'))
expected_bias = np.hstack([np.zeros((100,)),
forget_bias * np.ones(100, ), np.zeros((2 * 100,))])
assert_allclose(mod.get_params()[0][bias_argument].asnumpy(), expected_bias)
def test_gru():
cell = gluon.rnn.GRUCell(100, prefix='rnn_')
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs)
outputs = mx.sym.Group(outputs)
assert sorted(cell.collect_params().keys()) == ['rnn_h2h_bias', 'rnn_h2h_weight', 'rnn_i2h_bias', 'rnn_i2h_weight']
assert outputs.list_outputs() == ['rnn_t0_out_output', 'rnn_t1_out_output', 'rnn_t2_out_output']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))
assert outs == [(10, 100), (10, 100), (10, 100)]
def test_residual():
cell = gluon.rnn.ResidualCell(gluon.rnn.GRUCell(50, prefix='rnn_'))
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(2)]
outputs, _ = cell.unroll(2, inputs)
outputs = mx.sym.Group(outputs)
assert sorted(cell.collect_params().keys()) == \
['rnn_h2h_bias', 'rnn_h2h_weight', 'rnn_i2h_bias', 'rnn_i2h_weight']
# assert outputs.list_outputs() == \
# ['rnn_t0_out_plus_residual_output', 'rnn_t1_out_plus_residual_output']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10, 50), rnn_t1_data=(10, 50))
assert outs == [(10, 50), (10, 50)]
outputs = outputs.eval(rnn_t0_data=mx.nd.ones((10, 50)),
rnn_t1_data=mx.nd.ones((10, 50)),
rnn_i2h_weight=mx.nd.zeros((150, 50)),
rnn_i2h_bias=mx.nd.zeros((150,)),
rnn_h2h_weight=mx.nd.zeros((150, 50)),
rnn_h2h_bias=mx.nd.zeros((150,)))
expected_outputs = np.ones((10, 50))
assert np.array_equal(outputs[0].asnumpy(), expected_outputs)
assert np.array_equal(outputs[1].asnumpy(), expected_outputs)
def test_residual_bidirectional():
cell = gluon.rnn.ResidualCell(
gluon.rnn.BidirectionalCell(
gluon.rnn.GRUCell(25, prefix='rnn_l_'),
gluon.rnn.GRUCell(25, prefix='rnn_r_')))
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(2)]
outputs, _ = cell.unroll(2, inputs, merge_outputs=False)
outputs = mx.sym.Group(outputs)
assert sorted(cell.collect_params().keys()) == \
['rnn_l_h2h_bias', 'rnn_l_h2h_weight', 'rnn_l_i2h_bias', 'rnn_l_i2h_weight',
'rnn_r_h2h_bias', 'rnn_r_h2h_weight', 'rnn_r_i2h_bias', 'rnn_r_i2h_weight']
# assert outputs.list_outputs() == \
# ['bi_t0_plus_residual_output', 'bi_t1_plus_residual_output']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10, 50), rnn_t1_data=(10, 50))
assert outs == [(10, 50), (10, 50)]
outputs = outputs.eval(rnn_t0_data=mx.nd.ones((10, 50))+5,
rnn_t1_data=mx.nd.ones((10, 50))+5,
rnn_l_i2h_weight=mx.nd.zeros((75, 50)),
rnn_l_i2h_bias=mx.nd.zeros((75,)),
rnn_l_h2h_weight=mx.nd.zeros((75, 25)),
rnn_l_h2h_bias=mx.nd.zeros((75,)),
rnn_r_i2h_weight=mx.nd.zeros((75, 50)),
rnn_r_i2h_bias=mx.nd.zeros((75,)),
rnn_r_h2h_weight=mx.nd.zeros((75, 25)),
rnn_r_h2h_bias=mx.nd.zeros((75,)))
expected_outputs = np.ones((10, 50))+5
assert np.array_equal(outputs[0].asnumpy(), expected_outputs)
assert np.array_equal(outputs[1].asnumpy(), expected_outputs)
def test_stack():
cell = gluon.rnn.SequentialRNNCell()
for i in range(5):
if i == 1:
cell.add(gluon.rnn.ResidualCell(gluon.rnn.LSTMCell(100, prefix='rnn_stack%d_' % i)))
else:
cell.add(gluon.rnn.LSTMCell(100, prefix='rnn_stack%d_'%i))
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs)
outputs = mx.sym.Group(outputs)
keys = sorted(cell.collect_params().keys())
for i in range(5):
assert 'rnn_stack%d_h2h_weight'%i in keys
assert 'rnn_stack%d_h2h_bias'%i in keys
assert 'rnn_stack%d_i2h_weight'%i in keys
assert 'rnn_stack%d_i2h_bias'%i in keys
assert outputs.list_outputs() == ['rnn_stack4_t0_out_output', 'rnn_stack4_t1_out_output', 'rnn_stack4_t2_out_output']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))
assert outs == [(10, 100), (10, 100), (10, 100)]
def test_bidirectional():
cell = gluon.rnn.BidirectionalCell(
gluon.rnn.LSTMCell(100, prefix='rnn_l0_'),
gluon.rnn.LSTMCell(100, prefix='rnn_r0_'),
output_prefix='rnn_bi_')
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs)
outputs = mx.sym.Group(outputs)
assert outputs.list_outputs() == ['rnn_bi_t0_output', 'rnn_bi_t1_output', 'rnn_bi_t2_output']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))
assert outs == [(10, 200), (10, 200), (10, 200)]
def test_zoneout():
cell = gluon.rnn.ZoneoutCell(gluon.rnn.RNNCell(100, prefix='rnn_'), zoneout_outputs=0.5,
zoneout_states=0.5)
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs)
outputs = mx.sym.Group(outputs)
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))
assert outs == [(10, 100), (10, 100), (10, 100)]
def check_rnn_forward(layer, inputs, deterministic=True):
inputs.attach_grad()
layer.collect_params().initialize()
with mx.autograd.record():
out = layer.unroll(3, inputs, merge_outputs=False)[0]
mx.autograd.backward(out)
out = layer.unroll(3, inputs, merge_outputs=True)[0]
out.backward()
np_out = out.asnumpy()
np_dx = inputs.grad.asnumpy()
layer.hybridize()
with mx.autograd.record():
out = layer.unroll(3, inputs, merge_outputs=False)[0]
mx.autograd.backward(out)
out = layer.unroll(3, inputs, merge_outputs=True)[0]
out.backward()
if deterministic:
mx.test_utils.assert_almost_equal(np_out, out.asnumpy(), rtol=1e-3, atol=1e-5)
mx.test_utils.assert_almost_equal(np_dx, inputs.grad.asnumpy(), rtol=1e-3, atol=1e-5)
def test_rnn_cells():
check_rnn_forward(gluon.rnn.LSTMCell(100, input_size=200), mx.nd.ones((8, 3, 200)))
check_rnn_forward(gluon.rnn.RNNCell(100, input_size=200), mx.nd.ones((8, 3, 200)))
check_rnn_forward(gluon.rnn.GRUCell(100, input_size=200), mx.nd.ones((8, 3, 200)))
bilayer = gluon.rnn.BidirectionalCell(gluon.rnn.LSTMCell(100, input_size=200),
gluon.rnn.LSTMCell(100, input_size=200))
check_rnn_forward(bilayer, mx.nd.ones((8, 3, 200)))
check_rnn_forward(gluon.rnn.DropoutCell(0.5), mx.nd.ones((8, 3, 200)), False)
check_rnn_forward(gluon.rnn.ZoneoutCell(gluon.rnn.LSTMCell(100, input_size=200),
0.5, 0.2),
mx.nd.ones((8, 3, 200)), False)
net = gluon.rnn.SequentialRNNCell()
net.add(gluon.rnn.LSTMCell(100, input_size=200))
net.add(gluon.rnn.RNNCell(100, input_size=100))
net.add(gluon.rnn.GRUCell(100, input_size=100))
check_rnn_forward(net, mx.nd.ones((8, 3, 200)))
def check_rnn_layer_forward(layer, inputs, states=None):
layer.collect_params().initialize()
inputs.attach_grad()
with mx.autograd.record():
out = layer(inputs, states)
if states is not None:
assert isinstance(out, tuple) and len(out) == 2
out = out[0]
else:
assert isinstance(out, mx.nd.NDArray)
out.backward()
np_out = out.asnumpy()
np_dx = inputs.grad.asnumpy()
layer.hybridize()
with mx.autograd.record():
out = layer(inputs, states)
if states is not None:
assert isinstance(out, tuple) and len(out) == 2
out = out[0]
else:
assert isinstance(out, mx.nd.NDArray)
out.backward()
mx.test_utils.assert_almost_equal(np_out, out.asnumpy(), rtol=1e-3, atol=1e-5)
mx.test_utils.assert_almost_equal(np_dx, inputs.grad.asnumpy(), rtol=1e-3, atol=1e-5)
def test_rnn_layers():
check_rnn_layer_forward(gluon.rnn.RNN(10, 2), mx.nd.ones((8, 3, 20)))
check_rnn_layer_forward(gluon.rnn.RNN(10, 2), mx.nd.ones((8, 3, 20)), mx.nd.ones((2, 3, 10)))
check_rnn_layer_forward(gluon.rnn.LSTM(10, 2), mx.nd.ones((8, 3, 20)))
check_rnn_layer_forward(gluon.rnn.LSTM(10, 2), mx.nd.ones((8, 3, 20)), [mx.nd.ones((2, 3, 10)), mx.nd.ones((2, 3, 10))])
check_rnn_layer_forward(gluon.rnn.GRU(10, 2), mx.nd.ones((8, 3, 20)))
check_rnn_layer_forward(gluon.rnn.GRU(10, 2), mx.nd.ones((8, 3, 20)), mx.nd.ones((2, 3, 10)))
net = gluon.nn.Sequential()
net.add(gluon.rnn.LSTM(10, 2, bidirectional=True))
net.add(gluon.nn.BatchNorm(axis=2))
net.add(gluon.nn.Flatten())
net.add(gluon.nn.Dense(3, activation='relu'))
net.collect_params().initialize()
with mx.autograd.record():
net(mx.nd.ones((2, 3, 10))).backward()
if __name__ == '__main__':
import nose
nose.runmodule()
| 43.640569 | 124 | 0.643562 |
import mxnet as mx
from mxnet import gluon
import numpy as np
from numpy.testing import assert_allclose
import unittest
from mxnet.test_utils import almost_equal
def test_rnn():
cell = gluon.rnn.RNNCell(100, prefix='rnn_')
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs)
outputs = mx.sym.Group(outputs)
assert sorted(cell.collect_params().keys()) == ['rnn_h2h_bias', 'rnn_h2h_weight', 'rnn_i2h_bias', 'rnn_i2h_weight']
assert outputs.list_outputs() == ['rnn_t0_out_output', 'rnn_t1_out_output', 'rnn_t2_out_output']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))
assert outs == [(10, 100), (10, 100), (10, 100)]
def test_lstm():
cell = gluon.rnn.LSTMCell(100, prefix='rnn_')
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs)
outputs = mx.sym.Group(outputs)
assert sorted(cell.collect_params().keys()) == ['rnn_h2h_bias', 'rnn_h2h_weight', 'rnn_i2h_bias', 'rnn_i2h_weight']
assert outputs.list_outputs() == ['rnn_t0_out_output', 'rnn_t1_out_output', 'rnn_t2_out_output']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))
assert outs == [(10, 100), (10, 100), (10, 100)]
def test_lstm_forget_bias():
forget_bias = 2.0
stack = gluon.rnn.SequentialRNNCell()
stack.add(gluon.rnn.LSTMCell(100, i2h_bias_initializer=mx.init.LSTMBias(forget_bias), prefix='l0_'))
stack.add(gluon.rnn.LSTMCell(100, i2h_bias_initializer=mx.init.LSTMBias(forget_bias), prefix='l1_'))
dshape = (32, 1, 200)
data = mx.sym.Variable('data')
sym, _ = stack.unroll(1, data, merge_outputs=True)
mod = mx.mod.Module(sym, label_names=None, context=mx.cpu(0))
mod.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod.init_params()
bias_argument = next(x for x in sym.list_arguments() if x.endswith('i2h_bias'))
expected_bias = np.hstack([np.zeros((100,)),
forget_bias * np.ones(100, ), np.zeros((2 * 100,))])
assert_allclose(mod.get_params()[0][bias_argument].asnumpy(), expected_bias)
def test_gru():
cell = gluon.rnn.GRUCell(100, prefix='rnn_')
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs)
outputs = mx.sym.Group(outputs)
assert sorted(cell.collect_params().keys()) == ['rnn_h2h_bias', 'rnn_h2h_weight', 'rnn_i2h_bias', 'rnn_i2h_weight']
assert outputs.list_outputs() == ['rnn_t0_out_output', 'rnn_t1_out_output', 'rnn_t2_out_output']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))
assert outs == [(10, 100), (10, 100), (10, 100)]
def test_residual():
cell = gluon.rnn.ResidualCell(gluon.rnn.GRUCell(50, prefix='rnn_'))
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(2)]
outputs, _ = cell.unroll(2, inputs)
outputs = mx.sym.Group(outputs)
assert sorted(cell.collect_params().keys()) == \
['rnn_h2h_bias', 'rnn_h2h_weight', 'rnn_i2h_bias', 'rnn_i2h_weight']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10, 50), rnn_t1_data=(10, 50))
assert outs == [(10, 50), (10, 50)]
outputs = outputs.eval(rnn_t0_data=mx.nd.ones((10, 50)),
rnn_t1_data=mx.nd.ones((10, 50)),
rnn_i2h_weight=mx.nd.zeros((150, 50)),
rnn_i2h_bias=mx.nd.zeros((150,)),
rnn_h2h_weight=mx.nd.zeros((150, 50)),
rnn_h2h_bias=mx.nd.zeros((150,)))
expected_outputs = np.ones((10, 50))
assert np.array_equal(outputs[0].asnumpy(), expected_outputs)
assert np.array_equal(outputs[1].asnumpy(), expected_outputs)
def test_residual_bidirectional():
cell = gluon.rnn.ResidualCell(
gluon.rnn.BidirectionalCell(
gluon.rnn.GRUCell(25, prefix='rnn_l_'),
gluon.rnn.GRUCell(25, prefix='rnn_r_')))
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(2)]
outputs, _ = cell.unroll(2, inputs, merge_outputs=False)
outputs = mx.sym.Group(outputs)
assert sorted(cell.collect_params().keys()) == \
['rnn_l_h2h_bias', 'rnn_l_h2h_weight', 'rnn_l_i2h_bias', 'rnn_l_i2h_weight',
'rnn_r_h2h_bias', 'rnn_r_h2h_weight', 'rnn_r_i2h_bias', 'rnn_r_i2h_weight']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10, 50), rnn_t1_data=(10, 50))
assert outs == [(10, 50), (10, 50)]
outputs = outputs.eval(rnn_t0_data=mx.nd.ones((10, 50))+5,
rnn_t1_data=mx.nd.ones((10, 50))+5,
rnn_l_i2h_weight=mx.nd.zeros((75, 50)),
rnn_l_i2h_bias=mx.nd.zeros((75,)),
rnn_l_h2h_weight=mx.nd.zeros((75, 25)),
rnn_l_h2h_bias=mx.nd.zeros((75,)),
rnn_r_i2h_weight=mx.nd.zeros((75, 50)),
rnn_r_i2h_bias=mx.nd.zeros((75,)),
rnn_r_h2h_weight=mx.nd.zeros((75, 25)),
rnn_r_h2h_bias=mx.nd.zeros((75,)))
expected_outputs = np.ones((10, 50))+5
assert np.array_equal(outputs[0].asnumpy(), expected_outputs)
assert np.array_equal(outputs[1].asnumpy(), expected_outputs)
def test_stack():
cell = gluon.rnn.SequentialRNNCell()
for i in range(5):
if i == 1:
cell.add(gluon.rnn.ResidualCell(gluon.rnn.LSTMCell(100, prefix='rnn_stack%d_' % i)))
else:
cell.add(gluon.rnn.LSTMCell(100, prefix='rnn_stack%d_'%i))
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs)
outputs = mx.sym.Group(outputs)
keys = sorted(cell.collect_params().keys())
for i in range(5):
assert 'rnn_stack%d_h2h_weight'%i in keys
assert 'rnn_stack%d_h2h_bias'%i in keys
assert 'rnn_stack%d_i2h_weight'%i in keys
assert 'rnn_stack%d_i2h_bias'%i in keys
assert outputs.list_outputs() == ['rnn_stack4_t0_out_output', 'rnn_stack4_t1_out_output', 'rnn_stack4_t2_out_output']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))
assert outs == [(10, 100), (10, 100), (10, 100)]
def test_bidirectional():
cell = gluon.rnn.BidirectionalCell(
gluon.rnn.LSTMCell(100, prefix='rnn_l0_'),
gluon.rnn.LSTMCell(100, prefix='rnn_r0_'),
output_prefix='rnn_bi_')
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs)
outputs = mx.sym.Group(outputs)
assert outputs.list_outputs() == ['rnn_bi_t0_output', 'rnn_bi_t1_output', 'rnn_bi_t2_output']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))
assert outs == [(10, 200), (10, 200), (10, 200)]
def test_zoneout():
cell = gluon.rnn.ZoneoutCell(gluon.rnn.RNNCell(100, prefix='rnn_'), zoneout_outputs=0.5,
zoneout_states=0.5)
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs)
outputs = mx.sym.Group(outputs)
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))
assert outs == [(10, 100), (10, 100), (10, 100)]
def check_rnn_forward(layer, inputs, deterministic=True):
inputs.attach_grad()
layer.collect_params().initialize()
with mx.autograd.record():
out = layer.unroll(3, inputs, merge_outputs=False)[0]
mx.autograd.backward(out)
out = layer.unroll(3, inputs, merge_outputs=True)[0]
out.backward()
np_out = out.asnumpy()
np_dx = inputs.grad.asnumpy()
layer.hybridize()
with mx.autograd.record():
out = layer.unroll(3, inputs, merge_outputs=False)[0]
mx.autograd.backward(out)
out = layer.unroll(3, inputs, merge_outputs=True)[0]
out.backward()
if deterministic:
mx.test_utils.assert_almost_equal(np_out, out.asnumpy(), rtol=1e-3, atol=1e-5)
mx.test_utils.assert_almost_equal(np_dx, inputs.grad.asnumpy(), rtol=1e-3, atol=1e-5)
def test_rnn_cells():
check_rnn_forward(gluon.rnn.LSTMCell(100, input_size=200), mx.nd.ones((8, 3, 200)))
check_rnn_forward(gluon.rnn.RNNCell(100, input_size=200), mx.nd.ones((8, 3, 200)))
check_rnn_forward(gluon.rnn.GRUCell(100, input_size=200), mx.nd.ones((8, 3, 200)))
bilayer = gluon.rnn.BidirectionalCell(gluon.rnn.LSTMCell(100, input_size=200),
gluon.rnn.LSTMCell(100, input_size=200))
check_rnn_forward(bilayer, mx.nd.ones((8, 3, 200)))
check_rnn_forward(gluon.rnn.DropoutCell(0.5), mx.nd.ones((8, 3, 200)), False)
check_rnn_forward(gluon.rnn.ZoneoutCell(gluon.rnn.LSTMCell(100, input_size=200),
0.5, 0.2),
mx.nd.ones((8, 3, 200)), False)
net = gluon.rnn.SequentialRNNCell()
net.add(gluon.rnn.LSTMCell(100, input_size=200))
net.add(gluon.rnn.RNNCell(100, input_size=100))
net.add(gluon.rnn.GRUCell(100, input_size=100))
check_rnn_forward(net, mx.nd.ones((8, 3, 200)))
def check_rnn_layer_forward(layer, inputs, states=None):
layer.collect_params().initialize()
inputs.attach_grad()
with mx.autograd.record():
out = layer(inputs, states)
if states is not None:
assert isinstance(out, tuple) and len(out) == 2
out = out[0]
else:
assert isinstance(out, mx.nd.NDArray)
out.backward()
np_out = out.asnumpy()
np_dx = inputs.grad.asnumpy()
layer.hybridize()
with mx.autograd.record():
out = layer(inputs, states)
if states is not None:
assert isinstance(out, tuple) and len(out) == 2
out = out[0]
else:
assert isinstance(out, mx.nd.NDArray)
out.backward()
mx.test_utils.assert_almost_equal(np_out, out.asnumpy(), rtol=1e-3, atol=1e-5)
mx.test_utils.assert_almost_equal(np_dx, inputs.grad.asnumpy(), rtol=1e-3, atol=1e-5)
def test_rnn_layers():
check_rnn_layer_forward(gluon.rnn.RNN(10, 2), mx.nd.ones((8, 3, 20)))
check_rnn_layer_forward(gluon.rnn.RNN(10, 2), mx.nd.ones((8, 3, 20)), mx.nd.ones((2, 3, 10)))
check_rnn_layer_forward(gluon.rnn.LSTM(10, 2), mx.nd.ones((8, 3, 20)))
check_rnn_layer_forward(gluon.rnn.LSTM(10, 2), mx.nd.ones((8, 3, 20)), [mx.nd.ones((2, 3, 10)), mx.nd.ones((2, 3, 10))])
check_rnn_layer_forward(gluon.rnn.GRU(10, 2), mx.nd.ones((8, 3, 20)))
check_rnn_layer_forward(gluon.rnn.GRU(10, 2), mx.nd.ones((8, 3, 20)), mx.nd.ones((2, 3, 10)))
net = gluon.nn.Sequential()
net.add(gluon.rnn.LSTM(10, 2, bidirectional=True))
net.add(gluon.nn.BatchNorm(axis=2))
net.add(gluon.nn.Flatten())
net.add(gluon.nn.Dense(3, activation='relu'))
net.collect_params().initialize()
with mx.autograd.record():
net(mx.nd.ones((2, 3, 10))).backward()
if __name__ == '__main__':
import nose
nose.runmodule()
| true | true |
f71ac220110425c4090ee4f6700cf2ea38162317 | 2,372 | py | Python | foxlink/me_zrl_bound_evolvers.py | lamsoa729/FoXlink | 3c061b02968cdab1def752d5c145a6df4615504b | [
"BSD-3-Clause"
] | null | null | null | foxlink/me_zrl_bound_evolvers.py | lamsoa729/FoXlink | 3c061b02968cdab1def752d5c145a6df4615504b | [
"BSD-3-Clause"
] | null | null | null | foxlink/me_zrl_bound_evolvers.py | lamsoa729/FoXlink | 3c061b02968cdab1def752d5c145a6df4615504b | [
"BSD-3-Clause"
] | 2 | 2019-06-18T16:48:03.000Z | 2019-06-20T23:50:02.000Z | #!/usr/bin/env python
"""@package docstring
File: me_zrl_bound_evolvers.py
Author: Adam Lamson
Email: adam.lamson@colorado.edu
Description:
"""
import numpy as np
# from scipy.integrate import dblquad
from .me_helpers import dr_dt, convert_sol_to_geom
from .me_zrl_odes import (rod_geom_derivs_zrl, calc_moment_derivs_zrl,
calc_moment_derivs_zrl_B_terms,
calc_boundary_derivs_zrl)
from .me_zrl_helpers import (avg_force_zrl,
prep_zrl_bound_evolver,
get_zrl_moments_and_boundary_terms)
from .rod_steric_forces import calc_wca_force_torque
from .me_zrl_evolvers import prep_zrl_evolver
def evolver_zrl_bound(sol, fric_coeff, params):
"""!Calculate all time derivatives necessary to solve the moment expansion
evolution of the Fokker-Planck equation of zero rest length (zrl) crosslinkers
bound to moving rods. d<var> is the time derivative of corresponding
variable
@param sol: Solution vector to solve_ivp
@param fric_coeff: friction coefficients of rod
@param params: Constant parameters of the simulation
@return: Time-derivatives of all time varying quantities in a flattened
array
"""
# Define useful parameters for functions
hL_i, hL_j = (.5 * params['L_i'], .5 * params['L_j'])
ks = params['ks']
r_i, r_j, u_i, u_j = convert_sol_to_geom(sol)
r_ij = r_j - r_i
(scalar_geom, q_arr, Q_arr) = prep_zrl_bound_evolver(sol, params)
(mu_kl, B_terms) = get_zrl_moments_and_boundary_terms(sol)
if mu_kl[0] < 0.:
mu_kl[0] = 0.
if mu_kl[4] < 0.:
mu_kl[4] = 0.
if mu_kl[5] < 0.:
mu_kl[5] = 0.
# Get average force of crosslinkers on rod2
f_ij = avg_force_zrl(r_ij, u_i, u_j, mu_kl[0], mu_kl[1], mu_kl[2], ks)
# Evolution of rod positions
dgeom = rod_geom_derivs_zrl(f_ij, r_ij, u_i, u_j, scalar_geom,
mu_kl, fric_coeff, ks)
# Evolution of moments
dmu_kl = calc_moment_derivs_zrl_B_terms(mu_kl, scalar_geom,
q_arr, B_terms, params)
# Evolution of boundary condtions
dB_terms = calc_boundary_derivs_zrl(B_terms, scalar_geom, Q_arr, params)
dsol = np.concatenate(dgeom, dmu_kl, dB_terms)
return dsol
##########################################
| 35.939394 | 82 | 0.660624 |
import numpy as np
from .me_helpers import dr_dt, convert_sol_to_geom
from .me_zrl_odes import (rod_geom_derivs_zrl, calc_moment_derivs_zrl,
calc_moment_derivs_zrl_B_terms,
calc_boundary_derivs_zrl)
from .me_zrl_helpers import (avg_force_zrl,
prep_zrl_bound_evolver,
get_zrl_moments_and_boundary_terms)
from .rod_steric_forces import calc_wca_force_torque
from .me_zrl_evolvers import prep_zrl_evolver
def evolver_zrl_bound(sol, fric_coeff, params):
hL_i, hL_j = (.5 * params['L_i'], .5 * params['L_j'])
ks = params['ks']
r_i, r_j, u_i, u_j = convert_sol_to_geom(sol)
r_ij = r_j - r_i
(scalar_geom, q_arr, Q_arr) = prep_zrl_bound_evolver(sol, params)
(mu_kl, B_terms) = get_zrl_moments_and_boundary_terms(sol)
if mu_kl[0] < 0.:
mu_kl[0] = 0.
if mu_kl[4] < 0.:
mu_kl[4] = 0.
if mu_kl[5] < 0.:
mu_kl[5] = 0.
f_ij = avg_force_zrl(r_ij, u_i, u_j, mu_kl[0], mu_kl[1], mu_kl[2], ks)
dgeom = rod_geom_derivs_zrl(f_ij, r_ij, u_i, u_j, scalar_geom,
mu_kl, fric_coeff, ks)
dmu_kl = calc_moment_derivs_zrl_B_terms(mu_kl, scalar_geom,
q_arr, B_terms, params)
dB_terms = calc_boundary_derivs_zrl(B_terms, scalar_geom, Q_arr, params)
dsol = np.concatenate(dgeom, dmu_kl, dB_terms)
return dsol
| true | true |
f71ac5ae55c84dae849e3d0cc87c208a05d7bfcc | 264 | py | Python | antipetros_discordbot/engine/replacements/command_replacements/__init__.py | official-antistasi-community/Antipetros_Discord_Bot | 1b5c8b61c09e61cdff671e259f0478d343a50c8d | [
"MIT"
] | null | null | null | antipetros_discordbot/engine/replacements/command_replacements/__init__.py | official-antistasi-community/Antipetros_Discord_Bot | 1b5c8b61c09e61cdff671e259f0478d343a50c8d | [
"MIT"
] | null | null | null | antipetros_discordbot/engine/replacements/command_replacements/__init__.py | official-antistasi-community/Antipetros_Discord_Bot | 1b5c8b61c09e61cdff671e259f0478d343a50c8d | [
"MIT"
] | 1 | 2021-02-12T01:10:51.000Z | 2021-02-12T01:10:51.000Z | from .base_command import AntiPetrosBaseCommand
from .flag_command import AntiPetrosFlagCommand
from .creation_decorators import auto_meta_info_command, auto_meta_info_group
from .base_group import AntiPetrosBaseGroup
from .command_category import CommandCategory
| 44 | 77 | 0.897727 | from .base_command import AntiPetrosBaseCommand
from .flag_command import AntiPetrosFlagCommand
from .creation_decorators import auto_meta_info_command, auto_meta_info_group
from .base_group import AntiPetrosBaseGroup
from .command_category import CommandCategory
| true | true |
f71ac7200feac49fd738de102b33055f7d33fc8f | 1,793 | py | Python | setup.py | endreszabo/py-radix | 2efbefb87d278be5c33166ca108e3cdcd28637b9 | [
"BSD-4-Clause-UC"
] | null | null | null | setup.py | endreszabo/py-radix | 2efbefb87d278be5c33166ca108e3cdcd28637b9 | [
"BSD-4-Clause-UC"
] | null | null | null | setup.py | endreszabo/py-radix | 2efbefb87d278be5c33166ca108e3cdcd28637b9 | [
"BSD-4-Clause-UC"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2004 Damien Miller <djm@mindrot.org>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# $Id$
import platform
import sys
from distutils.core import setup, Extension
VERSION = "0.5"
if __name__ == '__main__':
libs = []
src = [ 'radix.c', 'radix_python.c' ]
if sys.platform == 'win32':
libs += [ 'ws2_32' ]
src += [ 'strlcpy.c' ]
if platform.version() < '6.0': # not newer than Vista
src += [ 'inet_ntop.c' ]
radix = Extension('radix', libraries = libs, sources = src)
setup( name = "radix",
version = VERSION,
author = "Damien Miller",
author_email = "djm@mindrot.org",
url = "http://www.mindrot.org/py-radix.html",
description = "Radix tree implementation",
long_description = """\
py-radix is an implementation of a radix tree data structure for the storage
and retrieval of IPv4 and IPv6 network prefixes.
The radix tree is the data structure most commonly used for routing table
lookups. It efficiently stores network prefixes of varying lengths and
allows fast lookups of containing networks.
""",
license = "BSD",
ext_modules = [radix]
)
| 35.156863 | 77 | 0.727273 |
import platform
import sys
from distutils.core import setup, Extension
VERSION = "0.5"
if __name__ == '__main__':
libs = []
src = [ 'radix.c', 'radix_python.c' ]
if sys.platform == 'win32':
libs += [ 'ws2_32' ]
src += [ 'strlcpy.c' ]
if platform.version() < '6.0':
src += [ 'inet_ntop.c' ]
radix = Extension('radix', libraries = libs, sources = src)
setup( name = "radix",
version = VERSION,
author = "Damien Miller",
author_email = "djm@mindrot.org",
url = "http://www.mindrot.org/py-radix.html",
description = "Radix tree implementation",
long_description = """\
py-radix is an implementation of a radix tree data structure for the storage
and retrieval of IPv4 and IPv6 network prefixes.
The radix tree is the data structure most commonly used for routing table
lookups. It efficiently stores network prefixes of varying lengths and
allows fast lookups of containing networks.
""",
license = "BSD",
ext_modules = [radix]
)
| true | true |
f71ac8c34ec504c775b0e08c86a5e168fd54c6a6 | 842 | py | Python | code/preprocessing/download_wordvecs.py | theblind/squad_challenge | 3cc81be6ca73e7160abffcc47dde6e188cd02fbb | [
"Apache-2.0"
] | null | null | null | code/preprocessing/download_wordvecs.py | theblind/squad_challenge | 3cc81be6ca73e7160abffcc47dde6e188cd02fbb | [
"Apache-2.0"
] | null | null | null | code/preprocessing/download_wordvecs.py | theblind/squad_challenge | 3cc81be6ca73e7160abffcc47dde6e188cd02fbb | [
"Apache-2.0"
] | null | null | null | import zipfile
import argparse
import os
from squad_preprocess import maybe_download
def setup_args():
parser = argparse.ArgumentParser()
parser.add_argument("--download_dir", required=True) # where to put the downloaded glove files
return parser.parse_args()
def main():
args = setup_args()
glove_base_url = "http://nlp.stanford.edu/data/"
glove_filename = "glove.6B.zip"
print("\nDownloading wordvecs to {}".format(args.download_dir))
if not os.path.exists(args.download_dir):
os.makedirs(args.download_dir)
maybe_download(glove_base_url, glove_filename, args.download_dir, 862182613)
glove_zip_ref = zipfile.ZipFile(os.path.join(args.download_dir, glove_filename), 'r')
glove_zip_ref.extractall(args.download_dir)
glove_zip_ref.close()
if __name__ == '__main__':
main()
| 27.16129 | 98 | 0.731591 | import zipfile
import argparse
import os
from squad_preprocess import maybe_download
def setup_args():
parser = argparse.ArgumentParser()
parser.add_argument("--download_dir", required=True)
return parser.parse_args()
def main():
args = setup_args()
glove_base_url = "http://nlp.stanford.edu/data/"
glove_filename = "glove.6B.zip"
print("\nDownloading wordvecs to {}".format(args.download_dir))
if not os.path.exists(args.download_dir):
os.makedirs(args.download_dir)
maybe_download(glove_base_url, glove_filename, args.download_dir, 862182613)
glove_zip_ref = zipfile.ZipFile(os.path.join(args.download_dir, glove_filename), 'r')
glove_zip_ref.extractall(args.download_dir)
glove_zip_ref.close()
if __name__ == '__main__':
main()
| true | true |
f71ac992ef0211e206b3d27bddfec1270d1c095f | 6,545 | py | Python | data_clean/preprocessing.py | shuishoudage/music_generator | 7c17ef5bb3a5d872bff5ac8e1664f57f5b4ea08f | [
"MIT"
] | null | null | null | data_clean/preprocessing.py | shuishoudage/music_generator | 7c17ef5bb3a5d872bff5ac8e1664f57f5b4ea08f | [
"MIT"
] | null | null | null | data_clean/preprocessing.py | shuishoudage/music_generator | 7c17ef5bb3a5d872bff5ac8e1664f57f5b4ea08f | [
"MIT"
] | 1 | 2019-10-14T11:48:23.000Z | 2019-10-14T11:48:23.000Z | from typing import List, Tuple, Dict, Any
from collections import Counter
import pretty_midi
import matplotlib.pyplot as plt
import librosa.display
import os
from os import listdir, walk
from os.path import isfile, isdir, join
from sys import argv
import traceback
import logging
import numpy as np
from shutil import copyfile
import shutil
# Ideas behind the preprocessing class
#
# 1. only use those midi with one tempo and one key, since some midi music
# have key and tempo changes inside. Which might make some unpredictable result
#
# 2. list distribution for all keys contained in the corpus. Only select those
# most frequent appeared. (different keys may increase training difficulty)
#
# 3. only select similar tempo music, based on the mean and std of tempos,
# simple one will be left boundary = mean - std, right boundary = mean + std
#
# 4. find the mean of highest and lowest pitch in the corpus. filter out those not
# the range. We have pitch range from 0-128, no meaning cover two extreme sides.
class FileReport(object):
"""
This class is mainly for generating meta information for our report
"""
def __init__(self,
tempos: List[float],
freq_key: Dict[int, int],
min_pitch: List[int],
max_pitch: List[int]):
self.tempos = tempos
self.freq_key = freq_key
self.min_pitch = min_pitch
self.max_pitch = max_pitch
def aggregation_report(self):
"""
two important variable are min_pitch and max_pitch,
since they will be used to decode from pitch to audio
"""
temp_mean = np.array(self.tempos).mean()
temp_std = np.array(self.tempos).std()
most_freq_key = self.getMostFreqValue(self.freq_key)
min_pitch = int(np.array(self.min_pitch).mean())
max_pitch = int(np.array(self.max_pitch).mean())
return temp_mean, temp_std, most_freq_key, min_pitch, max_pitch
def plots(self):
# implement later on
pass
def getMostFreqValue(self, keys: Dict[int, int], reversed=True) -> int:
return sorted(keys.items(), key=lambda kv: kv[1], reverse=reversed)[0][0]
class Preprocess(object):
def __init__(self, path: str):
self.path = path
self.fileFilter()
def generateMidiFileReport(self) -> FileReport:
"""
meta information like tempos, keys, pitches will be generated for
filtering the midi files
"""
tempos = []
keys = []
max_pitchs = []
min_pitchs = []
for pm in self.pms:
try:
tempos.append(pm.estimate_tempo())
key = pm.key_signature_changes[0].key_number
keys.append(key)
min_pitch, max_pitch = self.getMinMaxPitch(pm)
max_pitchs.append(max_pitch)
min_pitchs.append(min_pitch)
except:
pass
self.report = FileReport(tempos, dict(
Counter(keys)), min_pitchs, max_pitchs)
return self.report
def getMinMaxPitch(self, pm: pretty_midi.PrettyMIDI):
"""
find the min and max pitch inside a midi file
"""
notes = [
note.pitch for instrument in pm.instruments for note in instrument.notes
]
return min(notes), max(notes)
def SaveFilterMIDIfiles(self):
"""
according generated meta data info to filter out those not in range
"""
report = self.generateMidiFileReport()
temp_mean, temp_std, key, left_boundary, right_boundary = report.aggregation_report()
piano_roll_paths = []
for pm, path in zip(self.pms, self.paths):
try:
tempo = pm.estimate_tempo()
min_pitch, max_pitch = self.getMinMaxPitch(pm)
if self.isTempoInRange(tempo, temp_mean, temp_std) \
and self.isPitchInRange(min_pitch, max_pitch, left_boundary, right_boundary) \
and self.isKeyMatch(pm.key_signature_changes[0].key_number, key):
savedPath = os.path.join(os.getcwd(), 'filterData')
if not os.path.exists(savedPath):
os.makedirs(savedPath, exist_ok=True)
shutil.move(
path, os.path.join(os.getcwd(), 'filterData', os.path.basename(path)))
except:
pass
def isTempoInRange(self, tempo: float, mean: float, std: float) -> bool:
"""
a helper function that can be used check if a midi file's tempo in range
"""
if tempo > (mean - std) and tempo < (mean + std):
return True
return False
def isKeyMatch(self, key: int, grand_truth_key: int) -> bool:
if key == grand_truth_key:
return True
return False
def isPitchInRange(self, low_pitch: int,
high_pitch: int,
left_boundary: int,
right_boundary: int) -> bool:
if low_pitch >= left_boundary and high_pitch <= right_boundary:
return True
return False
def fileFilter(self):
"""
first filtering that only allow one tempo and one key inside a midi file
"""
self.pms: List[pretty_midi.PrettyMIDI] = []
self.paths: List[str] = []
for (dirPath, _, files) in walk(self.path): # type: ignore
for file in files:
# get the absoluted path of file
path = join(dirPath, file)
try:
pm = pretty_midi.PrettyMIDI(path)
# only handle files contain one key and one tempo
if len(pm.key_signature_changes) == 1 \
and len(pm.time_signature_changes) == 1:
self.pms.append(pm)
self.paths.append(path)
except: # skip all parsing exceptions
pass
def cliArgParser(argv) -> Any:
if len(argv) != 2:
raise ValueError(f"path of folder must be provided")
if isdir(argv[1]):
path = os.path.abspath(argv[1])
return path
else:
raise ValueError(f"provided path is not a folder")
if __name__ == "__main__":
try:
path = cliArgParser(argv)
p = Preprocess(path)
p.SaveFilterMIDIfiles()
except Exception as err:
print(traceback.format_exc())
exit(1)
| 35.570652 | 98 | 0.59343 | from typing import List, Tuple, Dict, Any
from collections import Counter
import pretty_midi
import matplotlib.pyplot as plt
import librosa.display
import os
from os import listdir, walk
from os.path import isfile, isdir, join
from sys import argv
import traceback
import logging
import numpy as np
from shutil import copyfile
import shutil
class FileReport(object):
def __init__(self,
tempos: List[float],
freq_key: Dict[int, int],
min_pitch: List[int],
max_pitch: List[int]):
self.tempos = tempos
self.freq_key = freq_key
self.min_pitch = min_pitch
self.max_pitch = max_pitch
def aggregation_report(self):
temp_mean = np.array(self.tempos).mean()
temp_std = np.array(self.tempos).std()
most_freq_key = self.getMostFreqValue(self.freq_key)
min_pitch = int(np.array(self.min_pitch).mean())
max_pitch = int(np.array(self.max_pitch).mean())
return temp_mean, temp_std, most_freq_key, min_pitch, max_pitch
def plots(self):
pass
def getMostFreqValue(self, keys: Dict[int, int], reversed=True) -> int:
return sorted(keys.items(), key=lambda kv: kv[1], reverse=reversed)[0][0]
class Preprocess(object):
def __init__(self, path: str):
self.path = path
self.fileFilter()
def generateMidiFileReport(self) -> FileReport:
tempos = []
keys = []
max_pitchs = []
min_pitchs = []
for pm in self.pms:
try:
tempos.append(pm.estimate_tempo())
key = pm.key_signature_changes[0].key_number
keys.append(key)
min_pitch, max_pitch = self.getMinMaxPitch(pm)
max_pitchs.append(max_pitch)
min_pitchs.append(min_pitch)
except:
pass
self.report = FileReport(tempos, dict(
Counter(keys)), min_pitchs, max_pitchs)
return self.report
def getMinMaxPitch(self, pm: pretty_midi.PrettyMIDI):
notes = [
note.pitch for instrument in pm.instruments for note in instrument.notes
]
return min(notes), max(notes)
def SaveFilterMIDIfiles(self):
report = self.generateMidiFileReport()
temp_mean, temp_std, key, left_boundary, right_boundary = report.aggregation_report()
piano_roll_paths = []
for pm, path in zip(self.pms, self.paths):
try:
tempo = pm.estimate_tempo()
min_pitch, max_pitch = self.getMinMaxPitch(pm)
if self.isTempoInRange(tempo, temp_mean, temp_std) \
and self.isPitchInRange(min_pitch, max_pitch, left_boundary, right_boundary) \
and self.isKeyMatch(pm.key_signature_changes[0].key_number, key):
savedPath = os.path.join(os.getcwd(), 'filterData')
if not os.path.exists(savedPath):
os.makedirs(savedPath, exist_ok=True)
shutil.move(
path, os.path.join(os.getcwd(), 'filterData', os.path.basename(path)))
except:
pass
def isTempoInRange(self, tempo: float, mean: float, std: float) -> bool:
if tempo > (mean - std) and tempo < (mean + std):
return True
return False
def isKeyMatch(self, key: int, grand_truth_key: int) -> bool:
if key == grand_truth_key:
return True
return False
def isPitchInRange(self, low_pitch: int,
high_pitch: int,
left_boundary: int,
right_boundary: int) -> bool:
if low_pitch >= left_boundary and high_pitch <= right_boundary:
return True
return False
def fileFilter(self):
self.pms: List[pretty_midi.PrettyMIDI] = []
self.paths: List[str] = []
for (dirPath, _, files) in walk(self.path):
for file in files:
path = join(dirPath, file)
try:
pm = pretty_midi.PrettyMIDI(path)
if len(pm.key_signature_changes) == 1 \
and len(pm.time_signature_changes) == 1:
self.pms.append(pm)
self.paths.append(path)
except:
pass
def cliArgParser(argv) -> Any:
if len(argv) != 2:
raise ValueError(f"path of folder must be provided")
if isdir(argv[1]):
path = os.path.abspath(argv[1])
return path
else:
raise ValueError(f"provided path is not a folder")
if __name__ == "__main__":
try:
path = cliArgParser(argv)
p = Preprocess(path)
p.SaveFilterMIDIfiles()
except Exception as err:
print(traceback.format_exc())
exit(1)
| true | true |
f71aca40733f04d9dbf52d3494976b80319f27ac | 1,059 | py | Python | installer/core/providers/aws/boto3/cloudwatch_event.py | dabest1/pacbot | 83189006905f7d43f48d416166490773edd89cb1 | [
"Apache-2.0"
] | null | null | null | installer/core/providers/aws/boto3/cloudwatch_event.py | dabest1/pacbot | 83189006905f7d43f48d416166490773edd89cb1 | [
"Apache-2.0"
] | null | null | null | installer/core/providers/aws/boto3/cloudwatch_event.py | dabest1/pacbot | 83189006905f7d43f48d416166490773edd89cb1 | [
"Apache-2.0"
] | 1 | 2019-06-11T11:14:05.000Z | 2019-06-11T11:14:05.000Z | import boto3
def get_event_client(access_key, secret_key, region):
"""
Returns the client object for AWS Events
Args:
access_key (str): AWS Access Key
secret_key (str): AWS Secret Key
region (str): AWS Region
Returns:
obj: AWS Cloudwatch Event Client Obj
"""
return boto3.client(
"events",
region_name=region,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
def check_rule_exists(rule_name, access_key, secret_key, region):
"""
Check wheter the given cloudwatch rule already exists in AWS account
Args:
rule_name (str): Cloudwatch rule name
access_key (str): AWS Access Key
secret_key (str): AWS Secret Key
region (str): AWS Region
Returns:
Boolean: True if env exists else False
"""
client = get_event_client(access_key, secret_key, region)
try:
response = client.describe_rule(Name=rule_name)
return True if response else False
except:
return False
| 25.214286 | 72 | 0.648725 | import boto3
def get_event_client(access_key, secret_key, region):
return boto3.client(
"events",
region_name=region,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
def check_rule_exists(rule_name, access_key, secret_key, region):
client = get_event_client(access_key, secret_key, region)
try:
response = client.describe_rule(Name=rule_name)
return True if response else False
except:
return False
| true | true |
f71aca5cb50d6e0d40cf7342ca3cded4cb68b824 | 1,870 | py | Python | finorch/sessions/cit/session.py | ADACS-Australia/SS2021B-DBrown | 67b93b316e6f9ab09e3bd5105edbbc71108e0723 | [
"MIT"
] | null | null | null | finorch/sessions/cit/session.py | ADACS-Australia/SS2021B-DBrown | 67b93b316e6f9ab09e3bd5105edbbc71108e0723 | [
"MIT"
] | null | null | null | finorch/sessions/cit/session.py | ADACS-Australia/SS2021B-DBrown | 67b93b316e6f9ab09e3bd5105edbbc71108e0723 | [
"MIT"
] | null | null | null | import logging
from finorch.config.config import api_config_manager
from finorch.sessions.cit.client import CITClient
from finorch.sessions.abstract_session import AbstractSession
from finorch.sessions.cit.wrapper import CITWrapper
from finorch.transport.ssh import SshTransport
class CITSession(AbstractSession):
callsign = "cit"
client_klass = CITClient
wrapper_klass = CITWrapper
transport_klass = SshTransport
def __init__(self, exec_path, username, python_path, env_file=None, *args, **kwargs):
"""
Creates a new cit session that can be used to run finesse jobs in parallel on cit.
:param exec_path: The path to where jobs should be executed (and results stored), if not specified the path
will be a temporary directory that is cleaned up when the client is terminated.
"""
super().__init__()
self._transport = CITSession.transport_klass(
self,
exec_path,
username=username,
python_path=python_path,
env_file=env_file,
host="ldas-grid.ligo.caltech.edu",
callsign=self.callsign,
*args,
**kwargs
)
cit_config = api_config_manager.get_section('cit')
remote_port = cit_config.get('remote_port', None) if cit_config else None
if remote_port:
logging.info("Attempting to reconnect remote client last seen on remote port " + str(remote_port))
else:
logging.info("Attempting to connect remote client")
remote_port = self._transport.connect(
remote_port=remote_port
)
logging.info("Remote client connected on port " + str(remote_port))
api_config_manager.set('cit', 'remote_port', str(remote_port))
@property
def transport(self):
return self._transport
| 32.807018 | 115 | 0.667914 | import logging
from finorch.config.config import api_config_manager
from finorch.sessions.cit.client import CITClient
from finorch.sessions.abstract_session import AbstractSession
from finorch.sessions.cit.wrapper import CITWrapper
from finorch.transport.ssh import SshTransport
class CITSession(AbstractSession):
callsign = "cit"
client_klass = CITClient
wrapper_klass = CITWrapper
transport_klass = SshTransport
def __init__(self, exec_path, username, python_path, env_file=None, *args, **kwargs):
super().__init__()
self._transport = CITSession.transport_klass(
self,
exec_path,
username=username,
python_path=python_path,
env_file=env_file,
host="ldas-grid.ligo.caltech.edu",
callsign=self.callsign,
*args,
**kwargs
)
cit_config = api_config_manager.get_section('cit')
remote_port = cit_config.get('remote_port', None) if cit_config else None
if remote_port:
logging.info("Attempting to reconnect remote client last seen on remote port " + str(remote_port))
else:
logging.info("Attempting to connect remote client")
remote_port = self._transport.connect(
remote_port=remote_port
)
logging.info("Remote client connected on port " + str(remote_port))
api_config_manager.set('cit', 'remote_port', str(remote_port))
@property
def transport(self):
return self._transport
| true | true |
f71acbda8152b39dcd69a9518aee969805ce1605 | 4,092 | py | Python | plotly/validators/scattergeo/marker/_line.py | fcollonval/plotly.py | 5c7f100db1af8c82bb740a38ef684955a8ed6d0e | [
"MIT"
] | 2 | 2020-03-24T11:41:14.000Z | 2021-01-14T07:59:43.000Z | plotly/validators/scattergeo/marker/_line.py | fcollonval/plotly.py | 5c7f100db1af8c82bb740a38ef684955a8ed6d0e | [
"MIT"
] | null | null | null | plotly/validators/scattergeo/marker/_line.py | fcollonval/plotly.py | 5c7f100db1af8c82bb740a38ef684955a8ed6d0e | [
"MIT"
] | 4 | 2019-06-03T14:49:12.000Z | 2022-01-06T01:05:12.000Z | import _plotly_utils.basevalidators
class LineValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name='line', parent_name='scattergeo.marker', **kwargs
):
super(LineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str='Line',
data_docs="""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.line.colorscale`. Has an
effect only if in `marker.line.color`is set to
a numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has
an effect only if in `marker.line.color`is set
to a numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are
set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmin` must be set as well.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmax` must be set as well.
color
Sets themarker.linecolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if
set.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array.
The colorscale must be an array containing
arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color
space, use`marker.line.cmin` and
`marker.line.cmax`. Alternatively, `colorscale`
may be a palette name string of the following
list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,R
eds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Black
body,Earth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color
.
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.line.color`is set to
a numerical array. If true, `marker.line.cmin`
will correspond to the last color in the array
and `marker.line.cmax` will correspond to the
first color.
width
Sets the width (in px) of the lines bounding
the marker points.
widthsrc
Sets the source reference on plot.ly for width
.
""",
**kwargs
)
| 47.034483 | 75 | 0.554497 | import _plotly_utils.basevalidators
class LineValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name='line', parent_name='scattergeo.marker', **kwargs
):
super(LineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str='Line',
data_docs="""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.line.colorscale`. Has an
effect only if in `marker.line.color`is set to
a numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has
an effect only if in `marker.line.color`is set
to a numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are
set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmin` must be set as well.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmax` must be set as well.
color
Sets themarker.linecolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if
set.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array.
The colorscale must be an array containing
arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color
space, use`marker.line.cmin` and
`marker.line.cmax`. Alternatively, `colorscale`
may be a palette name string of the following
list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,R
eds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Black
body,Earth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color
.
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.line.color`is set to
a numerical array. If true, `marker.line.cmin`
will correspond to the last color in the array
and `marker.line.cmax` will correspond to the
first color.
width
Sets the width (in px) of the lines bounding
the marker points.
widthsrc
Sets the source reference on plot.ly for width
.
""",
**kwargs
)
| true | true |
f71acbff3ef602966bb7796ad13e0aeba23cd1e4 | 203,023 | py | Python | gmusicapi/protocol/locker_pb2.py | siebert/Unofficial-Google-Music-API | 8222d566f5048c03f14beee031632fa80e3c0794 | [
"BSD-3-Clause"
] | 2 | 2016-09-06T07:32:06.000Z | 2019-11-20T22:22:39.000Z | gmusicapi/protocol/locker_pb2.py | siebert/Unofficial-Google-Music-API | 8222d566f5048c03f14beee031632fa80e3c0794 | [
"BSD-3-Clause"
] | null | null | null | gmusicapi/protocol/locker_pb2.py | siebert/Unofficial-Google-Music-API | 8222d566f5048c03f14beee031632fa80e3c0794 | [
"BSD-3-Clause"
] | 1 | 2019-11-20T22:22:41.000Z | 2019-11-20T22:22:41.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
import uits_pb2
DESCRIPTOR = descriptor.FileDescriptor(
name='locker.proto',
package='',
serialized_pb='\n\x0clocker.proto\x1a\nuits.proto\"\xf8\x01\n\x08\x41udioRef\x12\x1e\n\x05store\x18\x01 \x02(\x0e\x32\x0f.AudioRef.Store\x12\x0b\n\x03ref\x18\x02 \x02(\x0c\x12\x0b\n\x03url\x18\x04 \x01(\t\x12\x10\n\x08\x62it_rate\x18\x05 \x01(\x05\x12\x13\n\x0bsample_rate\x18\x06 \x01(\x05\x12\x14\n\x0c\x64ownloadable\x18\x07 \x01(\x08\x12\x17\n\x0f\x64uration_millis\x18\x08 \x01(\x03\x12\x19\n\x11rematch_timestamp\x18\t \x01(\x03\x12\x1e\n\x16invalid_due_to_wipeout\x18\n \x01(\x08\"!\n\x05Store\x12\r\n\tBLOBSTORE\x10\x01\x12\t\n\x05SM_V2\x10\x02\"\xd1\x01\n\x08ImageRef\x12\x1e\n\x05store\x18\x01 \x01(\x0e\x32\x0f.ImageRef.Store\x12\r\n\x05width\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\x0b\n\x03url\x18\x06 \x01(\t\x12\x1e\n\x16invalid_due_to_wipeout\x18\x07 \x01(\x08\x12 \n\x06origin\x18\x08 \x01(\x0e\x32\x10.ImageRef.Origin\"\x14\n\x05Store\x12\x0b\n\x07SHOEBOX\x10\x03\"!\n\x06Origin\x12\x0c\n\x08PERSONAL\x10\x01\x12\t\n\x05STORE\x10\x02\"1\n\x12UploadedUitsId3Tag\x12\r\n\x05owner\x18\x01 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"\x8c\x10\n\x05Track\x12\n\n\x02id\x18\x01 \x01(\t\x12\x11\n\tclient_id\x18\x02 \x01(\t\x12\x1a\n\x12\x63reation_timestamp\x18\x03 \x01(\x03\x12\x1f\n\x17last_modified_timestamp\x18\x04 \x01(\x03\x12\x16\n\x07\x64\x65leted\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\r\n\x05title\x18\x06 \x01(\t\x12\x0e\n\x06\x61rtist\x18\x07 \x01(\t\x12\x13\n\x0b\x61rtist_hash\x18. \x01(\x03\x12\x10\n\x08\x63omposer\x18\x08 \x01(\t\x12\r\n\x05\x61lbum\x18\t \x01(\t\x12\x14\n\x0c\x61lbum_artist\x18\n \x01(\t\x12\x17\n\x0f\x63\x61nonical_album\x18\x38 \x01(\t\x12\x18\n\x10\x63\x61nonical_artist\x18\x39 \x01(\t\x12\x1d\n\x15\x63\x61nonical_genre_album\x18: \x01(\t\x12\x0c\n\x04year\x18\x0b \x01(\x05\x12\x0f\n\x07\x63omment\x18\x0c \x01(\t\x12\x14\n\x0ctrack_number\x18\r \x01(\x05\x12\r\n\x05genre\x18\x0e \x01(\t\x12\x17\n\x0f\x64uration_millis\x18\x0f \x01(\x03\x12\x18\n\x10\x62\x65\x61ts_per_minute\x18\x10 \x01(\x05\x12\x19\n\x11original_bit_rate\x18, \x01(\x05\x12\x1c\n\taudio_ref\x18\x11 \x03(\x0b\x32\t.AudioRef\x12 \n\ralbum_art_ref\x18\x12 \x03(\x0b\x32\t.ImageRef\x12\x36\n\x13\x61vailability_status\x18\x13 \x01(\x0e\x32\x19.Track.AvailabilityStatus\x12\x12\n\nplay_count\x18\x14 \x01(\x05\x12(\n\x0c\x63ontent_type\x18\x19 \x01(\x0e\x32\x12.Track.ContentType\x12\x19\n\x11total_track_count\x18\x1a \x01(\x05\x12\x13\n\x0b\x64isc_number\x18\x1b \x01(\x05\x12\x18\n\x10total_disc_count\x18\x1c \x01(\x05\x12!\n\x08\x63hannels\x18\x1d \x01(\x0e\x32\x0f.Track.Channels\x12$\n\ntrack_type\x18\x1e \x01(\x0e\x32\x10.Track.TrackType\x12\x1e\n\x16use_single_server_copy\x18; \x01(\x08\x12\x1d\n\x06rating\x18\x1f \x01(\x0e\x32\r.Track.Rating\x12\x16\n\x0e\x65stimated_size\x18 \x01(\x03\x12\x10\n\x08store_id\x18! \x01(\t\x12\x12\n\nmetajam_id\x18\" \x01(\t\x12 \n\x15metajam_id_confidence\x18+ \x01(\x01:\x01\x30\x12\x0c\n\x04uits\x18# \x01(\t\x12$\n\ruits_metadata\x18( \x01(\x0b\x32\r.UitsMetadata\x12\x13\n\x0b\x63ompilation\x18$ \x01(\x08\x12\x19\n\x11\x63lient_date_added\x18% \x01(\x03\x12\x18\n\x10recent_timestamp\x18& \x01(\x03\x12\x1d\n\x0e\x64o_not_rematch\x18\' \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x13\x66rom_album_purchase\x18) \x01(\x08\x12\x18\n\x10\x61lbum_metajam_id\x18* \x01(\t\x12\x16\n\x0etransaction_id\x18- \x01(\t\x12\x13\n\x0b\x64\x65\x62ug_track\x18/ \x01(\x08\x12\x18\n\x10normalized_title\x18\x30 \x01(\t\x12\x19\n\x11normalized_artist\x18\x31 \x01(\t\x12\x18\n\x10normalized_album\x18\x32 \x01(\t\x12\x1f\n\x17normalized_album_artist\x18\x33 \x01(\t\x12\"\n\x1anormalized_canonical_album\x18\x36 \x01(\t\x12#\n\x1bnormalized_canonical_artist\x18\x37 \x01(\t\x12\x13\n\x0buploader_id\x18\x34 \x01(\t\x12\x17\n\x0f\x63lient_album_id\x18\x35 \x01(\t\x12\x18\n\x10label_owner_code\x18< \x01(\t\x12\x31\n\x15original_content_type\x18= \x01(\x0e\x32\x12.Track.ContentType\x12*\n\ruploaded_uits\x18G \x03(\x0b\x32\x13.UploadedUitsId3Tag\"\x86\x01\n\x12\x41vailabilityStatus\x12\x0b\n\x07PENDING\x10\x01\x12\x0b\n\x07MATCHED\x10\x02\x12\x14\n\x10UPLOAD_REQUESTED\x10\x03\x12\r\n\tAVAILABLE\x10\x04\x12\x12\n\x0e\x46ORCE_REUPLOAD\x10\x05\x12\x1d\n\x19UPLOAD_PERMANENTLY_FAILED\x10\x06\"W\n\x0b\x43ontentType\x12\x07\n\x03MP3\x10\x01\x12\x07\n\x03M4A\x10\x02\x12\x07\n\x03\x41\x41\x43\x10\x03\x12\x08\n\x04\x46LAC\x10\x04\x12\x07\n\x03OGG\x10\x05\x12\x07\n\x03WMA\x10\x06\x12\x07\n\x03M4P\x10\x07\x12\x08\n\x04\x41LAC\x10\x08\" \n\x08\x43hannels\x12\x08\n\x04MONO\x10\x01\x12\n\n\x06STEREO\x10\x02\"\x8b\x01\n\tTrackType\x12\x11\n\rMATCHED_TRACK\x10\x01\x12\x13\n\x0fUNMATCHED_TRACK\x10\x02\x12\x0f\n\x0bLOCAL_TRACK\x10\x03\x12\x13\n\x0fPURCHASED_TRACK\x10\x04\x12\x1f\n\x1bMETADATA_ONLY_MATCHED_TRACK\x10\x05\x12\x0f\n\x0bPROMO_TRACK\x10\x06\"e\n\x06Rating\x12\r\n\tNOT_RATED\x10\x01\x12\x0c\n\x08ONE_STAR\x10\x02\x12\r\n\tTWO_STARS\x10\x03\x12\x0f\n\x0bTHREE_STARS\x10\x04\x12\x0e\n\nFOUR_STARS\x10\x05\x12\x0e\n\nFIVE_STARS\x10\x06\"\x1f\n\x06Tracks\x12\x15\n\x05track\x18\x01 \x03(\x0b\x32\x06.Track\"\xb4\x02\n\x08Playlist\x12\n\n\x02id\x18\x01 \x01(\t\x12\x11\n\tclient_id\x18\x02 \x01(\t\x12\x1a\n\x12\x63reation_timestamp\x18\x03 \x01(\x03\x12\x1f\n\x17last_modified_timestamp\x18\x04 \x01(\x03\x12\x16\n\x07\x64\x65leted\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x0c\n\x04name\x18\x06 \x01(\t\x12-\n\rplaylist_type\x18\x07 \x01(\x0e\x32\x16.Playlist.PlaylistType\x12#\n\x10playlist_art_ref\x18\x08 \x01(\x0b\x32\t.ImageRef\x12\x18\n\x10recent_timestamp\x18\t \x01(\x03\"8\n\x0cPlaylistType\x12\x12\n\x0eUSER_GENERATED\x10\x01\x12\t\n\x05MAGIC\x10\x02\x12\t\n\x05PROMO\x10\x03\"\xae\x03\n\rPlaylistEntry\x12\x13\n\x0bplaylist_id\x18\x01 \x01(\t\x12\x19\n\x11\x61\x62solute_position\x18\x02 \x01(\x03\x12\x1c\n\x14place_after_entry_id\x18\x03 \x01(\t\x12\x10\n\x08track_id\x18\x04 \x01(\t\x12\n\n\x02id\x18\x05 \x01(\t\x12\x11\n\tclient_id\x18\x06 \x01(\t\x12\x1a\n\x12\x63reation_timestamp\x18\x07 \x01(\x03\x12\x1f\n\x17last_modified_timestamp\x18\x08 \x01(\x03\x12\x16\n\x07\x64\x65leted\x18\t \x01(\x08:\x05\x66\x61lse\x12H\n\x19relative_position_id_type\x18\n \x01(\x0e\x32%.PlaylistEntry.RelativePositionIdType\x12\x15\n\x05track\x18\x0f \x01(\x0b\x32\x06.Track\x12\x1d\n\x15place_before_entry_id\x18\x10 \x01(\t\x12\x17\n\x0fstring_position\x18\x11 \x01(\t\"0\n\x16RelativePositionIdType\x12\n\n\x06SERVER\x10\x01\x12\n\n\x06\x43LIENT\x10\x02\"\xd0\x03\n\x16TrackSearchRestriction\x12\x39\n\tattribute\x18\x01 \x02(\x0e\x32&.TrackSearchRestriction.TrackAttribute\x12\r\n\x05value\x18\x02 \x02(\t\x12?\n\x0f\x63omparison_type\x18\x03 \x02(\x0e\x32&.TrackSearchRestriction.ComparisonType\"\xa6\x01\n\x0eTrackAttribute\x12\t\n\x05TITLE\x10\x01\x12\n\n\x06\x41RTIST\x10\x02\x12\t\n\x05\x41LBUM\x10\x03\x12\x10\n\x0c\x41LBUM_ARTIST\x10\x04\x12\t\n\x05GENRE\x10\x05\x12\x17\n\x13\x41VAILABILITY_STATUS\x10\x06\x12\x0e\n\nTRACK_TYPE\x10\x07\x12\x08\n\x04YEAR\x10\x08\x12\x0c\n\x08STORE_ID\x10\t\x12\x14\n\x10\x41LBUM_METAJAM_ID\x10\n\"\x81\x01\n\x0e\x43omparisonType\x12\t\n\x05\x45QUAL\x10\x00\x12\r\n\tNOT_EQUAL\x10\x01\x12\x10\n\x0cGREATER_THAN\x10\x02\x12\x11\n\rGREATER_EQUAL\x10\x03\x12\r\n\tLESS_THAN\x10\x04\x12\x0e\n\nLESS_EQUAL\x10\x05\x12\x11\n\rPARTIAL_MATCH\x10\x06\"\xda\x01\n\x19TrackSearchRestrictionSet\x12;\n\x04type\x18\x01 \x01(\x0e\x32-.TrackSearchRestrictionSet.RestrictionSetType\x12,\n\x0brestriction\x18\x02 \x03(\x0b\x32\x17.TrackSearchRestriction\x12+\n\x07sub_set\x18\x03 \x03(\x0b\x32\x1a.TrackSearchRestrictionSet\"%\n\x12RestrictionSetType\x12\x07\n\x03\x41ND\x10\x00\x12\x06\n\x02OR\x10\x01\"\x80\x02\n\x0eTrackSortOrder\x12\x31\n\tattribute\x18\x01 \x01(\x0e\x32\x1e.TrackSortOrder.TrackAttribute\x12\x18\n\ndescending\x18\x02 \x01(\x08:\x04true\"\xa0\x01\n\x0eTrackAttribute\x12\x16\n\x12LAST_MODIFIED_TIME\x10\x01\x12\n\n\x06\x41RTIST\x10\x02\x12\t\n\x05\x41LBUM\x10\x03\x12\t\n\x05TITLE\x10\x04\x12\x10\n\x0cTRACK_NUMBER\x10\x06\x12\x0e\n\nPLAY_COUNT\x10\t\x12\x13\n\x0f\x44URATION_MILLIS\x10\n\x12\n\n\x06RATING\x10\x0b\x12\x11\n\rCREATION_TIME\x10\x0c\"\xfe\x02\n\x10GetTracksRequest\x12\x0f\n\x07gaia_id\x18\x01 \x02(\x03\x12\x13\n\x0bupdated_min\x18\x02 \x01(\x03\x12\x17\n\x0finclude_deleted\x18\x03 \x01(\x08\x12\x13\n\x0bmax_results\x18\x04 \x01(\x05\x12\x1a\n\x12\x63ontinuation_token\x18\x05 \x01(\t\x12\x33\n\x12search_restriction\x18\x06 \x03(\x0b\x32\x17.TrackSearchRestriction\x12#\n\nsort_order\x18\x07 \x03(\x0b\x32\x0f.TrackSortOrder\x12\x33\n\x0frestriction_set\x18\x08 \x01(\x0b\x32\x1a.TrackSearchRestrictionSet\x12;\n\x10track_projection\x18\t \x01(\x0e\x32!.GetTracksRequest.TrackProjection\".\n\x0fTrackProjection\x12\x08\n\x04\x46ULL\x10\x01\x12\x11\n\rFRONTEND_VIEW\x10\x02\"\xd3\x01\n\x11GetTracksResponse\x12\x36\n\rresponse_code\x18\x01 \x02(\x0e\x32\x1f.GetTracksResponse.ResponseCode\x12\x15\n\x05track\x18\x02 \x03(\x0b\x32\x06.Track\x12\x1f\n\x17\x65stimated_total_results\x18\x03 \x01(\x03\x12\x1a\n\x12\x63ontinuation_token\x18\x04 \x01(\t\"2\n\x0cResponseCode\x12\x06\n\x02OK\x10\x01\x12\x10\n\x0cNOT_MODIFIED\x10\x02\x12\x08\n\x04GONE\x10\x03\"\xfc\x01\n\x19GetPlaylistEntriesRequest\x12\x0f\n\x07gaia_id\x18\x01 \x02(\x03\x12\x13\n\x0bupdated_min\x18\x02 \x01(\x03\x12\x17\n\x0finclude_deleted\x18\x03 \x01(\x08\x12\x13\n\x0bmax_results\x18\x04 \x01(\x05\x12\x1a\n\x12\x63ontinuation_token\x18\x05 \x01(\t\x12\x1a\n\x12playlist_id_filter\x18\x06 \x01(\t\x12)\n\x1ainclude_all_track_metadata\x18\x07 \x01(\x08:\x05\x66\x61lse\x12(\n\x1aonly_show_available_tracks\x18\x08 \x01(\x08:\x04true\"\xf6\x01\n\x1aGetPlaylistEntriesResponse\x12?\n\rresponse_code\x18\x01 \x02(\x0e\x32(.GetPlaylistEntriesResponse.ResponseCode\x12&\n\x0eplaylist_entry\x18\x02 \x03(\x0b\x32\x0e.PlaylistEntry\x12\x1f\n\x17\x65stimated_total_results\x18\x03 \x01(\x03\x12\x1a\n\x12\x63ontinuation_token\x18\x04 \x01(\t\"2\n\x0cResponseCode\x12\x06\n\x02OK\x10\x01\x12\x10\n\x0cNOT_MODIFIED\x10\x02\x12\x08\n\x04GONE\x10\x03\"\xc8\x01\n\x11PlaylistSortOrder\x12\x37\n\tattribute\x18\x01 \x01(\x0e\x32$.PlaylistSortOrder.PlaylistAttribute\x12\x19\n\ndescending\x18\x02 \x01(\x08:\x05\x66\x61lse\"_\n\x11PlaylistAttribute\x12\x16\n\x12LAST_MODIFIED_TIME\x10\x01\x12\t\n\x05TITLE\x10\x02\x12\x11\n\rCREATION_TIME\x10\x03\x12\x14\n\x10RECENT_TIMESTAMP\x10\x04\"\xad\x01\n\x13GetPlaylistsRequest\x12\x0f\n\x07gaia_id\x18\x01 \x02(\x03\x12\x13\n\x0bupdated_min\x18\x02 \x01(\x03\x12\x17\n\x0finclude_deleted\x18\x03 \x01(\x08\x12\x13\n\x0bmax_results\x18\x04 \x01(\x05\x12\x1a\n\x12\x63ontinuation_token\x18\x05 \x01(\t\x12&\n\nsort_order\x18\x06 \x01(\x0b\x32\x12.PlaylistSortOrder\"\xdf\x01\n\x14GetPlaylistsResponse\x12\x39\n\rresponse_code\x18\x01 \x02(\x0e\x32\".GetPlaylistsResponse.ResponseCode\x12\x1b\n\x08playlist\x18\x02 \x03(\x0b\x32\t.Playlist\x12\x1f\n\x17\x65stimated_total_results\x18\x03 \x01(\x03\x12\x1a\n\x12\x63ontinuation_token\x18\x04 \x01(\t\"2\n\x0cResponseCode\x12\x06\n\x02OK\x10\x01\x12\x10\n\x0cNOT_MODIFIED\x10\x02\x12\x08\n\x04GONE\x10\x03\"3\n\x12LookupTrackRequest\x12\n\n\x02id\x18\x01 \x01(\t\x12\x11\n\tclient_id\x18\x02 \x01(\t\";\n\x1aLookupPlaylistEntryRequest\x12\n\n\x02id\x18\x01 \x01(\t\x12\x11\n\tclient_id\x18\x02 \x01(\t\"6\n\x15LookupPlaylistRequest\x12\n\n\x02id\x18\x01 \x01(\t\x12\x11\n\tclient_id\x18\x02 \x01(\t\"\xbe\x02\n\x12\x42\x61tchLookupRequest\x12\x0f\n\x07gaia_id\x18\x01 \x02(\x03\x12\"\n\x05track\x18\x02 \x03(\x0b\x32\x13.LookupTrackRequest\x12(\n\x08playlist\x18\x03 \x03(\x0b\x32\x16.LookupPlaylistRequest\x12\x37\n\rmetadata_type\x18\x04 \x01(\x0e\x32 .BatchLookupRequest.MetadataType\x12\x33\n\x0eplaylist_entry\x18\x05 \x03(\x0b\x32\x1b.LookupPlaylistEntryRequest\x12\x1e\n\x0finclude_deleted\x18\x06 \x01(\x08:\x05\x66\x61lse\";\n\x0cMetadataType\x12\t\n\x05TRACK\x10\x01\x12\x0c\n\x08PLAYLIST\x10\x02\x12\x12\n\x0ePLAYLIST_ENTRY\x10\x03\"q\n\x13\x42\x61tchLookupResponse\x12\x15\n\x05track\x18\x01 \x03(\x0b\x32\x06.Track\x12\x1b\n\x08playlist\x18\x02 \x03(\x0b\x32\t.Playlist\x12&\n\x0eplaylist_entry\x18\x03 \x03(\x0b\x32\x0e.PlaylistEntry\"\xba\x01\n\x12MutateTrackRequest\x12\x1c\n\x0c\x63reate_track\x18\x01 \x01(\x0b\x32\x06.Track\x12\x1c\n\x0cupdate_track\x18\x02 \x01(\x0b\x32\x06.Track\x12\x14\n\x0c\x64\x65lete_track\x18\x03 \x01(\t\x12\x16\n\x0epartial_update\x18\x04 \x01(\x08\x12\"\n\x14update_last_modified\x18\x05 \x01(\x08:\x04true\x12\x16\n\x0eundelete_track\x18\x06 \x01(\t\"\xb6\x03\n\x0eMutateResponse\x12\x39\n\rresponse_code\x18\x01 \x01(\x0e\x32\".MutateResponse.MutateResponseCode\x12\n\n\x02id\x18\x02 \x01(\t\x12\x10\n\x08\x63hild_id\x18\x03 \x03(\t\x12\x11\n\tclient_id\x18\x04 \x01(\t\x12?\n\x13\x61vailability_status\x18\x05 \x01(\x0e\x32\".MutateResponse.AvailabilityStatus\x12\x15\n\rerror_message\x18\x06 \x01(\t\"W\n\x12MutateResponseCode\x12\x06\n\x02OK\x10\x01\x12\x0c\n\x08\x43ONFLICT\x10\x02\x12\x13\n\x0fINVALID_REQUEST\x10\x03\x12\x16\n\x12METADATA_TOO_LARGE\x10\x04\"\x86\x01\n\x12\x41vailabilityStatus\x12\x0b\n\x07PENDING\x10\x01\x12\x0b\n\x07MATCHED\x10\x02\x12\x14\n\x10UPLOAD_REQUESTED\x10\x03\x12\r\n\tAVAILABLE\x10\x04\x12\x12\n\x0e\x46ORCE_REUPLOAD\x10\x05\x12\x1d\n\x19UPLOAD_PERMANENTLY_FAILED\x10\x06\"\xcd\x01\n\x18\x42\x61tchMutateTracksRequest\x12\x0f\n\x07gaia_id\x18\x01 \x02(\x03\x12+\n\x0etrack_mutation\x18\x02 \x03(\x0b\x32\x13.MutateTrackRequest\x12\x1f\n\x11send_notification\x18\x03 \x01(\x08:\x04true\x12\'\n\x19\x64\x65tect_timestamp_conflict\x18\x04 \x01(\x08:\x04true\x12)\n\x1bnotify_fine_grained_updates\x18\x05 \x01(\x08:\x04true\"\xcd\x01\n\x19\x42\x61tchMutateTracksResponse\x12O\n\rresponse_code\x18\x01 \x03(\x0e\x32\x38.BatchMutateTracksResponse.BatchMutateTracksResponseCode\x12(\n\x0fmutate_response\x18\x02 \x03(\x0b\x32\x0f.MutateResponse\"5\n\x1d\x42\x61tchMutateTracksResponseCode\x12\x06\n\x02OK\x10\x01\x12\x0c\n\x08\x43ONFLICT\x10\x02\"\xf7\x01\n\x15MutatePlaylistRequest\x12\"\n\x0f\x63reate_playlist\x18\x01 \x01(\x0b\x32\t.Playlist\x12\"\n\x0fupdate_playlist\x18\x02 \x01(\x0b\x32\t.Playlist\x12\x17\n\x0f\x64\x65lete_playlist\x18\x03 \x01(\t\x12\x16\n\x0epartial_update\x18\x04 \x01(\x08\x12&\n\x0eplaylist_entry\x18\x05 \x03(\x0b\x32\x0e.PlaylistEntry\x12\"\n\x14update_last_modified\x18\x06 \x01(\x08:\x04true\x12\x19\n\x11undelete_playlist\x18\x07 \x01(\t\"\xd7\x01\n\x1b\x42\x61tchMutatePlaylistsRequest\x12\x0f\n\x07gaia_id\x18\x01 \x02(\x03\x12\x31\n\x11playlist_mutation\x18\x02 \x03(\x0b\x32\x16.MutatePlaylistRequest\x12\x1f\n\x11send_notification\x18\x03 \x01(\x08:\x04true\x12\'\n\x19\x64\x65tect_timestamp_conflict\x18\x04 \x01(\x08:\x04true\x12*\n\x1bnotify_fine_grained_updates\x18\x05 \x01(\x08:\x05\x66\x61lse\"\xd9\x01\n\x1c\x42\x61tchMutatePlaylistsResponse\x12U\n\rresponse_code\x18\x01 \x03(\x0e\x32>.BatchMutatePlaylistsResponse.BatchMutatePlaylistsResponseCode\x12(\n\x0fmutate_response\x18\x02 \x03(\x0b\x32\x0f.MutateResponse\"8\n BatchMutatePlaylistsResponseCode\x12\x06\n\x02OK\x10\x01\x12\x0c\n\x08\x43ONFLICT\x10\x02\"\xee\x01\n\x1aMutatePlaylistEntryRequest\x12-\n\x15\x63reate_playlist_entry\x18\x01 \x01(\x0b\x32\x0e.PlaylistEntry\x12-\n\x15update_playlist_entry\x18\x02 \x01(\x0b\x32\x0e.PlaylistEntry\x12-\n\x15\x64\x65lete_playlist_entry\x18\x03 \x01(\x0b\x32\x0e.PlaylistEntry\x12\"\n\x14update_last_modified\x18\x04 \x01(\x08:\x04true\x12\x1f\n\x17undelete_playlist_entry\x18\x05 \x01(\t\"\xe8\x01\n!BatchMutatePlaylistEntriesRequest\x12\x0f\n\x07gaia_id\x18\x01 \x02(\x03\x12<\n\x17playlist_entry_mutation\x18\x02 \x03(\x0b\x32\x1b.MutatePlaylistEntryRequest\x12\x1f\n\x11send_notification\x18\x03 \x01(\x08:\x04true\x12\'\n\x19\x64\x65tect_timestamp_conflict\x18\x04 \x01(\x08:\x04true\x12*\n\x1bnotify_fine_grained_updates\x18\x05 \x01(\x08:\x05\x66\x61lse\"\xf1\x01\n\"BatchMutatePlaylistEntriesResponse\x12\x61\n\rresponse_code\x18\x01 \x03(\x0e\x32J.BatchMutatePlaylistEntriesResponse.BatchMutatePlaylistEntriesResponseCode\x12(\n\x0fmutate_response\x18\x02 \x03(\x0b\x32\x0f.MutateResponse\">\n&BatchMutatePlaylistEntriesResponseCode\x12\x06\n\x02OK\x10\x01\x12\x0c\n\x08\x43ONFLICT\x10\x02\"\x90\x01\n\x11MagicPlaylistSeed\x12.\n\tseed_type\x18\x01 \x02(\x0e\x32\x1b.MagicPlaylistSeed.SeedType\x12\x0c\n\x04seed\x18\x02 \x02(\t\"=\n\x08SeedType\x12\t\n\x05TRACK\x10\x00\x12\n\n\x06\x41RTIST\x10\x01\x12\t\n\x05\x41LBUM\x10\x02\x12\x0f\n\x0bOPAQUE_SEED\x10\x03\"\xd1\x01\n\x14MagicPlaylistRequest\x12\x0f\n\x07gaia_id\x18\x01 \x02(\x03\x12\x15\n\rplaylist_name\x18\x02 \x01(\t\x12\x13\n\x0bplaylist_id\x18\x03 \x01(\t\x12 \n\x04seed\x18\x04 \x03(\x0b\x32\x12.MagicPlaylistSeed\x12\x1b\n\x13num_recommendations\x18\x05 \x01(\x05\x12)\n\x1ainclude_all_track_metadata\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x12\n\nmodel_name\x18\x07 \x01(\t\"\\\n\x15MagicPlaylistResponse\x12\x1b\n\x08playlist\x18\x01 \x01(\x0b\x32\t.Playlist\x12&\n\x0eplaylist_entry\x18\x02 \x03(\x0b\x32\x0e.PlaylistEntry\"\xf8\x01\n\x12\x46lushLockerRequest\x12\x0f\n\x07gaia_id\x18\x01 \x01(\x03\x12\x13\n\x0bgaia_cookie\x18\x02 \x01(\t\x12#\n\x15remove_audio_binaries\x18\x03 \x01(\x08:\x04true\x12#\n\x15remove_image_binaries\x18\x04 \x01(\x08:\x04true\x12\x1f\n\x11send_notification\x18\x05 \x01(\x08:\x04true\x12&\n\x17reset_subscription_type\x18\x06 \x01(\x08:\x05\x66\x61lse\x12)\n\x1bnotify_fine_grained_updates\x18\x08 \x01(\x08:\x04true\"\x8a\x01\n\x13\x46lushLockerResponse\x12\x16\n\x0etracks_removed\x18\x01 \x01(\x05\x12\x17\n\x0f\x65ntries_removed\x18\x02 \x01(\x05\x12\x19\n\x11playlists_removed\x18\x03 \x01(\x05\x12\'\n\x1fsuccess_reset_subscription_type\x18\x04 \x01(\x08\"6\n\x12LockerNotification\x12\x0f\n\x07gaia_id\x18\x01 \x02(\x03\x12\x0f\n\x07payload\x18\x02 \x01(\x0c\"\xd6\x01\n\x05\x41lbum\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x61lbum_artist\x18\x02 \x01(\t\x12\x1c\n\talbum_art\x18\x03 \x01(\x0b\x32\t.ImageRef\x12\x13\n\x0btrack_count\x18\x04 \x01(\x05\x12\x18\n\x10last_time_played\x18\x05 \x01(\x03\x12\x16\n\x0eis_compilation\x18\x06 \x01(\x08\x12\x18\n\x10\x61lbum_metajam_id\x18\x07 \x01(\t\x12\x1a\n\x12\x63reation_timestamp\x18\x08 \x01(\x03\x12\x0e\n\x06\x61rtist\x18\t \x01(\t\"\xa3\x01\n\x0e\x41lbumSortOrder\x12\x31\n\tattribute\x18\x01 \x01(\x0e\x32\x1e.AlbumSortOrder.AlbumAttribute\x12\x19\n\ndescending\x18\x02 \x01(\x08:\x05\x66\x61lse\"C\n\x0e\x41lbumAttribute\x12\x14\n\x10LAST_PLAYED_TIME\x10\x01\x12\x08\n\x04NAME\x10\x02\x12\x11\n\rCREATION_TIME\x10\x03\"]\n\x10GetAlbumsRequest\x12\x0f\n\x07gaia_id\x18\x01 \x02(\x03\x12#\n\nsort_order\x18\x02 \x01(\x0b\x32\x0f.AlbumSortOrder\x12\x13\n\x0bmax_results\x18\x03 \x01(\x05\"*\n\x11GetAlbumsResponse\x12\x15\n\x05\x61lbum\x18\x01 \x03(\x0b\x32\x06.Album\"H\n\x06\x41rtist\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x19\n\x11total_track_count\x18\x02 \x01(\x05\x12\x15\n\x05\x61lbum\x18\x03 \x03(\x0b\x32\x06.Album\",\n\x0f\x41rtistSortOrder\x12\x19\n\ndescending\x18\x02 \x01(\x08:\x05\x66\x61lse\"_\n\x11GetArtistsRequest\x12\x0f\n\x07gaia_id\x18\x01 \x02(\x03\x12$\n\nsort_order\x18\x02 \x01(\x0b\x32\x10.ArtistSortOrder\x12\x13\n\x0bmax_results\x18\x03 \x01(\x05\"-\n\x12GetArtistsResponse\x12\x17\n\x06\x61rtist\x18\x01 \x03(\x0b\x32\x07.Artist\"L\n\nMusicGenre\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x19\n\x11total_track_count\x18\x02 \x01(\x05\x12\x15\n\x05\x61lbum\x18\x03 \x03(\x0b\x32\x06.Album\"+\n\x0eGenreSortOrder\x12\x19\n\ndescending\x18\x02 \x01(\x08:\x05\x66\x61lse\"]\n\x10GetGenresRequest\x12\x0f\n\x07gaia_id\x18\x01 \x02(\x03\x12#\n\nsort_order\x18\x02 \x01(\x0b\x32\x0f.GenreSortOrder\x12\x13\n\x0bmax_results\x18\x03 \x01(\x05\"/\n\x11GetGenresResponse\x12\x1a\n\x05genre\x18\x01 \x03(\x0b\x32\x0b.MusicGenre\"\xe6\x02\n GetDynamicPlaylistEntriesRequest\x12\x0f\n\x07gaia_id\x18\x01 \x02(\x03\x12[\n\x15playlist_entries_type\x18\x04 \x02(\x0e\x32<.GetDynamicPlaylistEntriesRequest.DynamicPlaylistEntriesType\x12\x13\n\x0bmax_results\x18\x02 \x01(\x05\x12\x1a\n\x12\x63ontinuation_token\x18\x03 \x01(\t\x12)\n\x1ainclude_all_track_metadata\x18\x05 \x01(\x08:\x05\x66\x61lse\"x\n\x1a\x44ynamicPlaylistEntriesType\x12\r\n\tPURCHASED\x10\x01\x12\r\n\tTHUMBS_UP\x10\x02\x12\x12\n\x0eRECENTLY_ADDED\x10\x03\x12\x0c\n\x08PROMOTED\x10\x04\x12\x1a\n\x16PROMOTED_AND_PURCHASED\x10\x05\"\xda\x03\n!GetDynamicPlaylistEntriesResponse\x12\x46\n\rresponse_code\x18\x01 \x02(\x0e\x32/.GetDynamicPlaylistEntriesResponse.ResponseCode\x12&\n\x0eplaylist_entry\x18\x02 \x03(\x0b\x32\x0e.PlaylistEntry\x12\x1f\n\x17\x65stimated_total_results\x18\x03 \x01(\x03\x12\x1a\n\x12\x63ontinuation_token\x18\x04 \x01(\t\x12\\\n\x15playlist_entries_type\x18\x05 \x01(\x0e\x32=.GetDynamicPlaylistEntriesResponse.DynamicPlaylistEntriesType\"\x85\x01\n\x1a\x44ynamicPlaylistEntriesType\x12\r\n\tPURCHASED\x10\x01\x12\r\n\tTHUMBS_UP\x10\x02\x12\x12\n\x0eRECENTLY_ADDED\x10\x03\x12\x0c\n\x08PROMOTED\x10\x04\x12\x0b\n\x07UNKNOWN\x10\x05\x12\x1a\n\x16PROMOTED_AND_PURCHASED\x10\x06\"\"\n\x0cResponseCode\x12\x06\n\x02OK\x10\x01\x12\n\n\x06NOT_OK\x10\x02\"4\n!GetAggregationsByTrackTypeRequest\x12\x0f\n\x07gaia_id\x18\x01 \x02(\x03\"\xea\x01\n\x12TrackTypeAggregate\x12\x37\n\x10track_type_value\x18\x01 \x01(\x0e\x32\x1d.TrackTypeAggregate.TrackType\x12\r\n\x05\x63ount\x18\x02 \x01(\x05\"\x8b\x01\n\tTrackType\x12\x11\n\rMATCHED_TRACK\x10\x01\x12\x13\n\x0fUNMATCHED_TRACK\x10\x02\x12\x0f\n\x0bLOCAL_TRACK\x10\x03\x12\x13\n\x0fPURCHASED_TRACK\x10\x04\x12\x1f\n\x1bMETADATA_ONLY_MATCHED_TRACK\x10\x05\x12\x0f\n\x0bPROMO_TRACK\x10\x06\"W\n\"GetAggregationsByTrackTypeResponse\x12\x31\n\x14track_type_aggregate\x18\x01 \x03(\x0b\x32\x13.TrackTypeAggregate\"=\n*GetAggregationsByAvailabilityStatusRequest\x12\x0f\n\x07gaia_id\x18\x01 \x02(\x03\"\x83\x02\n\x1b\x41vailabilityStatusAggregate\x12L\n\x13\x61vailability_status\x18\x01 \x01(\x0e\x32/.AvailabilityStatusAggregate.AvailabilityStatus\x12\r\n\x05\x63ount\x18\x02 \x01(\x05\"\x86\x01\n\x12\x41vailabilityStatus\x12\x0b\n\x07PENDING\x10\x01\x12\x0b\n\x07MATCHED\x10\x02\x12\x14\n\x10UPLOAD_REQUESTED\x10\x03\x12\r\n\tAVAILABLE\x10\x04\x12\x12\n\x0e\x46ORCE_REUPLOAD\x10\x05\x12\x1d\n\x19UPLOAD_PERMANENTLY_FAILED\x10\x06\"r\n+GetAggregationsByAvailabilityStatusResponse\x12\x43\n\x1d\x61vailability_status_aggregate\x18\x01 \x03(\x0b\x32\x1c.AvailabilityStatusAggregate\"7\n\x15\x41\x64\x64PromoTracksRequest\x12\x0f\n\x07gaia_id\x18\x01 \x02(\x03\x12\r\n\x05genre\x18\x02 \x03(\t\"/\n\x16\x41\x64\x64PromoTracksResponse\x12\x15\n\x05track\x18\x01 \x03(\x0b\x32\x06.Track\"J\n\x1eGetPlaylistAggregationsRequest\x12\x0f\n\x07gaia_id\x18\x01 \x02(\x03\x12\x17\n\x0bmax_results\x18\x02 \x01(\x05:\x02\x31\x34\"\x83\x01\n\x11PlaylistAggregate\x12\x13\n\x0bplaylist_id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x1c\n\talbum_art\x18\x03 \x01(\x0b\x32\t.ImageRef\x12\x13\n\x0btrack_count\x18\x04 \x01(\x03\x12\x18\n\x10last_time_played\x18\x05 \x01(\x03\"Q\n\x1fGetPlaylistAggregationsResponse\x12.\n\x12playlist_aggregate\x18\x01 \x03(\x0b\x32\x12.PlaylistAggregate\"?\n\x1bRemoteControlCommandRequest\x12\x0f\n\x07gaia_id\x18\x01 \x01(\x03\x12\x0f\n\x07\x63ommand\x18\x02 \x01(\t\"\xb3\x01\n\x1cRemoteControlCommandResponse\x12\x41\n\rresponse_code\x18\x01 \x01(\x0e\x32*.RemoteControlCommandResponse.ResponseCode\"P\n\x0cResponseCode\x12\x06\n\x02OK\x10\x01\x12\x10\n\x0cNO_PUBLISHER\x10\x02\x12\x13\n\x0fINVALID_REQUEST\x10\x03\x12\x11\n\rPUBLISH_ERROR\x10\x04')
_AUDIOREF_STORE = descriptor.EnumDescriptor(
name='Store',
full_name='AudioRef.Store',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='BLOBSTORE', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='SM_V2', index=1, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=244,
serialized_end=277,
)
_IMAGEREF_STORE = descriptor.EnumDescriptor(
name='Store',
full_name='ImageRef.Store',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='SHOEBOX', index=0, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=434,
serialized_end=454,
)
_IMAGEREF_ORIGIN = descriptor.EnumDescriptor(
name='Origin',
full_name='ImageRef.Origin',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='PERSONAL', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='STORE', index=1, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=456,
serialized_end=489,
)
_TRACK_AVAILABILITYSTATUS = descriptor.EnumDescriptor(
name='AvailabilityStatus',
full_name='Track.AvailabilityStatus',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='PENDING', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='MATCHED', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='UPLOAD_REQUESTED', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='AVAILABLE', index=3, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='FORCE_REUPLOAD', index=4, number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='UPLOAD_PERMANENTLY_FAILED', index=5, number=6,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2101,
serialized_end=2235,
)
_TRACK_CONTENTTYPE = descriptor.EnumDescriptor(
name='ContentType',
full_name='Track.ContentType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='MP3', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='M4A', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='AAC', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='FLAC', index=3, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='OGG', index=4, number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='WMA', index=5, number=6,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='M4P', index=6, number=7,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='ALAC', index=7, number=8,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2237,
serialized_end=2324,
)
_TRACK_CHANNELS = descriptor.EnumDescriptor(
name='Channels',
full_name='Track.Channels',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='MONO', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='STEREO', index=1, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2326,
serialized_end=2358,
)
_TRACK_TRACKTYPE = descriptor.EnumDescriptor(
name='TrackType',
full_name='Track.TrackType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='MATCHED_TRACK', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='UNMATCHED_TRACK', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='LOCAL_TRACK', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='PURCHASED_TRACK', index=3, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='METADATA_ONLY_MATCHED_TRACK', index=4, number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='PROMO_TRACK', index=5, number=6,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2361,
serialized_end=2500,
)
_TRACK_RATING = descriptor.EnumDescriptor(
name='Rating',
full_name='Track.Rating',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='NOT_RATED', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='ONE_STAR', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TWO_STARS', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='THREE_STARS', index=3, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='FOUR_STARS', index=4, number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='FIVE_STARS', index=5, number=6,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2502,
serialized_end=2603,
)
_PLAYLIST_PLAYLISTTYPE = descriptor.EnumDescriptor(
name='PlaylistType',
full_name='Playlist.PlaylistType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='USER_GENERATED', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='MAGIC', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='PROMO', index=2, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2891,
serialized_end=2947,
)
_PLAYLISTENTRY_RELATIVEPOSITIONIDTYPE = descriptor.EnumDescriptor(
name='RelativePositionIdType',
full_name='PlaylistEntry.RelativePositionIdType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='SERVER', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CLIENT', index=1, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=3332,
serialized_end=3380,
)
_TRACKSEARCHRESTRICTION_TRACKATTRIBUTE = descriptor.EnumDescriptor(
name='TrackAttribute',
full_name='TrackSearchRestriction.TrackAttribute',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='TITLE', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='ARTIST', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='ALBUM', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='ALBUM_ARTIST', index=3, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='GENRE', index=4, number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='AVAILABILITY_STATUS', index=5, number=6,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TRACK_TYPE', index=6, number=7,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='YEAR', index=7, number=8,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='STORE_ID', index=8, number=9,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='ALBUM_METAJAM_ID', index=9, number=10,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=3549,
serialized_end=3715,
)
_TRACKSEARCHRESTRICTION_COMPARISONTYPE = descriptor.EnumDescriptor(
name='ComparisonType',
full_name='TrackSearchRestriction.ComparisonType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='EQUAL', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='NOT_EQUAL', index=1, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='GREATER_THAN', index=2, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='GREATER_EQUAL', index=3, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='LESS_THAN', index=4, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='LESS_EQUAL', index=5, number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='PARTIAL_MATCH', index=6, number=6,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=3718,
serialized_end=3847,
)
_TRACKSEARCHRESTRICTIONSET_RESTRICTIONSETTYPE = descriptor.EnumDescriptor(
name='RestrictionSetType',
full_name='TrackSearchRestrictionSet.RestrictionSetType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='AND', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='OR', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=4031,
serialized_end=4068,
)
_TRACKSORTORDER_TRACKATTRIBUTE = descriptor.EnumDescriptor(
name='TrackAttribute',
full_name='TrackSortOrder.TrackAttribute',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='LAST_MODIFIED_TIME', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='ARTIST', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='ALBUM', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TITLE', index=3, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TRACK_NUMBER', index=4, number=6,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='PLAY_COUNT', index=5, number=9,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DURATION_MILLIS', index=6, number=10,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='RATING', index=7, number=11,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CREATION_TIME', index=8, number=12,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=4167,
serialized_end=4327,
)
_GETTRACKSREQUEST_TRACKPROJECTION = descriptor.EnumDescriptor(
name='TrackProjection',
full_name='GetTracksRequest.TrackProjection',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='FULL', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='FRONTEND_VIEW', index=1, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=4666,
serialized_end=4712,
)
_GETTRACKSRESPONSE_RESPONSECODE = descriptor.EnumDescriptor(
name='ResponseCode',
full_name='GetTracksResponse.ResponseCode',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='OK', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='NOT_MODIFIED', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='GONE', index=2, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=4876,
serialized_end=4926,
)
_GETPLAYLISTENTRIESRESPONSE_RESPONSECODE = descriptor.EnumDescriptor(
name='ResponseCode',
full_name='GetPlaylistEntriesResponse.ResponseCode',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='OK', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='NOT_MODIFIED', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='GONE', index=2, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=4876,
serialized_end=4926,
)
_PLAYLISTSORTORDER_PLAYLISTATTRIBUTE = descriptor.EnumDescriptor(
name='PlaylistAttribute',
full_name='PlaylistSortOrder.PlaylistAttribute',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='LAST_MODIFIED_TIME', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TITLE', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CREATION_TIME', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='RECENT_TIMESTAMP', index=3, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=5538,
serialized_end=5633,
)
_GETPLAYLISTSRESPONSE_RESPONSECODE = descriptor.EnumDescriptor(
name='ResponseCode',
full_name='GetPlaylistsResponse.ResponseCode',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='OK', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='NOT_MODIFIED', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='GONE', index=2, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=4876,
serialized_end=4926,
)
_BATCHLOOKUPREQUEST_METADATATYPE = descriptor.EnumDescriptor(
name='MetadataType',
full_name='BatchLookupRequest.MetadataType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='TRACK', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='PLAYLIST', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='PLAYLIST_ENTRY', index=2, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=6467,
serialized_end=6526,
)
_MUTATERESPONSE_MUTATERESPONSECODE = descriptor.EnumDescriptor(
name='MutateResponseCode',
full_name='MutateResponse.MutateResponseCode',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='OK', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CONFLICT', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='INVALID_REQUEST', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='METADATA_TOO_LARGE', index=3, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=7047,
serialized_end=7134,
)
_MUTATERESPONSE_AVAILABILITYSTATUS = descriptor.EnumDescriptor(
name='AvailabilityStatus',
full_name='MutateResponse.AvailabilityStatus',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='PENDING', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='MATCHED', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='UPLOAD_REQUESTED', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='AVAILABLE', index=3, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='FORCE_REUPLOAD', index=4, number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='UPLOAD_PERMANENTLY_FAILED', index=5, number=6,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2101,
serialized_end=2235,
)
_BATCHMUTATETRACKSRESPONSE_BATCHMUTATETRACKSRESPONSECODE = descriptor.EnumDescriptor(
name='BatchMutateTracksResponseCode',
full_name='BatchMutateTracksResponse.BatchMutateTracksResponseCode',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='OK', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CONFLICT', index=1, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=7634,
serialized_end=7687,
)
_BATCHMUTATEPLAYLISTSRESPONSE_BATCHMUTATEPLAYLISTSRESPONSECODE = descriptor.EnumDescriptor(
name='BatchMutatePlaylistsResponseCode',
full_name='BatchMutatePlaylistsResponse.BatchMutatePlaylistsResponseCode',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='OK', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CONFLICT', index=1, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=8319,
serialized_end=8375,
)
_BATCHMUTATEPLAYLISTENTRIESRESPONSE_BATCHMUTATEPLAYLISTENTRIESRESPONSECODE = descriptor.EnumDescriptor(
name='BatchMutatePlaylistEntriesResponseCode',
full_name='BatchMutatePlaylistEntriesResponse.BatchMutatePlaylistEntriesResponseCode',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='OK', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CONFLICT', index=1, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=9033,
serialized_end=9095,
)
_MAGICPLAYLISTSEED_SEEDTYPE = descriptor.EnumDescriptor(
name='SeedType',
full_name='MagicPlaylistSeed.SeedType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='TRACK', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='ARTIST', index=1, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='ALBUM', index=2, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='OPAQUE_SEED', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=9181,
serialized_end=9242,
)
_ALBUMSORTORDER_ALBUMATTRIBUTE = descriptor.EnumDescriptor(
name='AlbumAttribute',
full_name='AlbumSortOrder.AlbumAttribute',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='LAST_PLAYED_TIME', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='NAME', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CREATION_TIME', index=2, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=10312,
serialized_end=10379,
)
_GETDYNAMICPLAYLISTENTRIESREQUEST_DYNAMICPLAYLISTENTRIESTYPE = descriptor.EnumDescriptor(
name='DynamicPlaylistEntriesType',
full_name='GetDynamicPlaylistEntriesRequest.DynamicPlaylistEntriesType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='PURCHASED', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='THUMBS_UP', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='RECENTLY_ADDED', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='PROMOTED', index=3, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='PROMOTED_AND_PURCHASED', index=4, number=5,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=11290,
serialized_end=11410,
)
_GETDYNAMICPLAYLISTENTRIESRESPONSE_DYNAMICPLAYLISTENTRIESTYPE = descriptor.EnumDescriptor(
name='DynamicPlaylistEntriesType',
full_name='GetDynamicPlaylistEntriesResponse.DynamicPlaylistEntriesType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='PURCHASED', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='THUMBS_UP', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='RECENTLY_ADDED', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='PROMOTED', index=3, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='UNKNOWN', index=4, number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='PROMOTED_AND_PURCHASED', index=5, number=6,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=11718,
serialized_end=11851,
)
_GETDYNAMICPLAYLISTENTRIESRESPONSE_RESPONSECODE = descriptor.EnumDescriptor(
name='ResponseCode',
full_name='GetDynamicPlaylistEntriesResponse.ResponseCode',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='OK', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='NOT_OK', index=1, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=11853,
serialized_end=11887,
)
_TRACKTYPEAGGREGATE_TRACKTYPE = descriptor.EnumDescriptor(
name='TrackType',
full_name='TrackTypeAggregate.TrackType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='MATCHED_TRACK', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='UNMATCHED_TRACK', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='LOCAL_TRACK', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='PURCHASED_TRACK', index=3, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='METADATA_ONLY_MATCHED_TRACK', index=4, number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='PROMO_TRACK', index=5, number=6,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2361,
serialized_end=2500,
)
_AVAILABILITYSTATUSAGGREGATE_AVAILABILITYSTATUS = descriptor.EnumDescriptor(
name='AvailabilityStatus',
full_name='AvailabilityStatusAggregate.AvailabilityStatus',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='PENDING', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='MATCHED', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='UPLOAD_REQUESTED', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='AVAILABLE', index=3, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='FORCE_REUPLOAD', index=4, number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='UPLOAD_PERMANENTLY_FAILED', index=5, number=6,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2101,
serialized_end=2235,
)
_REMOTECONTROLCOMMANDRESPONSE_RESPONSECODE = descriptor.EnumDescriptor(
name='ResponseCode',
full_name='RemoteControlCommandResponse.ResponseCode',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='OK', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='NO_PUBLISHER', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='INVALID_REQUEST', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='PUBLISH_ERROR', index=3, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=13274,
serialized_end=13354,
)
_AUDIOREF = descriptor.Descriptor(
name='AudioRef',
full_name='AudioRef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='store', full_name='AudioRef.store', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='ref', full_name='AudioRef.ref', index=1,
number=2, type=12, cpp_type=9, label=2,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='url', full_name='AudioRef.url', index=2,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='bit_rate', full_name='AudioRef.bit_rate', index=3,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='sample_rate', full_name='AudioRef.sample_rate', index=4,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='downloadable', full_name='AudioRef.downloadable', index=5,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='duration_millis', full_name='AudioRef.duration_millis', index=6,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='rematch_timestamp', full_name='AudioRef.rematch_timestamp', index=7,
number=9, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='invalid_due_to_wipeout', full_name='AudioRef.invalid_due_to_wipeout', index=8,
number=10, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_AUDIOREF_STORE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=29,
serialized_end=277,
)
_IMAGEREF = descriptor.Descriptor(
name='ImageRef',
full_name='ImageRef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='store', full_name='ImageRef.store', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=3,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='width', full_name='ImageRef.width', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='height', full_name='ImageRef.height', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='url', full_name='ImageRef.url', index=3,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='invalid_due_to_wipeout', full_name='ImageRef.invalid_due_to_wipeout', index=4,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='origin', full_name='ImageRef.origin', index=5,
number=8, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_IMAGEREF_STORE,
_IMAGEREF_ORIGIN,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=280,
serialized_end=489,
)
_UPLOADEDUITSID3TAG = descriptor.Descriptor(
name='UploadedUitsId3Tag',
full_name='UploadedUitsId3Tag',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='owner', full_name='UploadedUitsId3Tag.owner', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='data', full_name='UploadedUitsId3Tag.data', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=491,
serialized_end=540,
)
_TRACK = descriptor.Descriptor(
name='Track',
full_name='Track',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='id', full_name='Track.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='client_id', full_name='Track.client_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='creation_timestamp', full_name='Track.creation_timestamp', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='last_modified_timestamp', full_name='Track.last_modified_timestamp', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='deleted', full_name='Track.deleted', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='title', full_name='Track.title', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='artist', full_name='Track.artist', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='artist_hash', full_name='Track.artist_hash', index=7,
number=46, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='composer', full_name='Track.composer', index=8,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='album', full_name='Track.album', index=9,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='album_artist', full_name='Track.album_artist', index=10,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='canonical_album', full_name='Track.canonical_album', index=11,
number=56, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='canonical_artist', full_name='Track.canonical_artist', index=12,
number=57, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='canonical_genre_album', full_name='Track.canonical_genre_album', index=13,
number=58, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='year', full_name='Track.year', index=14,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='comment', full_name='Track.comment', index=15,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='track_number', full_name='Track.track_number', index=16,
number=13, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='genre', full_name='Track.genre', index=17,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='duration_millis', full_name='Track.duration_millis', index=18,
number=15, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='beats_per_minute', full_name='Track.beats_per_minute', index=19,
number=16, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='original_bit_rate', full_name='Track.original_bit_rate', index=20,
number=44, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='audio_ref', full_name='Track.audio_ref', index=21,
number=17, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='album_art_ref', full_name='Track.album_art_ref', index=22,
number=18, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='availability_status', full_name='Track.availability_status', index=23,
number=19, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='play_count', full_name='Track.play_count', index=24,
number=20, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='content_type', full_name='Track.content_type', index=25,
number=25, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='total_track_count', full_name='Track.total_track_count', index=26,
number=26, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='disc_number', full_name='Track.disc_number', index=27,
number=27, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='total_disc_count', full_name='Track.total_disc_count', index=28,
number=28, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='channels', full_name='Track.channels', index=29,
number=29, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='track_type', full_name='Track.track_type', index=30,
number=30, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='use_single_server_copy', full_name='Track.use_single_server_copy', index=31,
number=59, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='rating', full_name='Track.rating', index=32,
number=31, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='estimated_size', full_name='Track.estimated_size', index=33,
number=32, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='store_id', full_name='Track.store_id', index=34,
number=33, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='metajam_id', full_name='Track.metajam_id', index=35,
number=34, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='metajam_id_confidence', full_name='Track.metajam_id_confidence', index=36,
number=43, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='uits', full_name='Track.uits', index=37,
number=35, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='uits_metadata', full_name='Track.uits_metadata', index=38,
number=40, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='compilation', full_name='Track.compilation', index=39,
number=36, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='client_date_added', full_name='Track.client_date_added', index=40,
number=37, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='recent_timestamp', full_name='Track.recent_timestamp', index=41,
number=38, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='do_not_rematch', full_name='Track.do_not_rematch', index=42,
number=39, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='from_album_purchase', full_name='Track.from_album_purchase', index=43,
number=41, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='album_metajam_id', full_name='Track.album_metajam_id', index=44,
number=42, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='transaction_id', full_name='Track.transaction_id', index=45,
number=45, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='debug_track', full_name='Track.debug_track', index=46,
number=47, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='normalized_title', full_name='Track.normalized_title', index=47,
number=48, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='normalized_artist', full_name='Track.normalized_artist', index=48,
number=49, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='normalized_album', full_name='Track.normalized_album', index=49,
number=50, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='normalized_album_artist', full_name='Track.normalized_album_artist', index=50,
number=51, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='normalized_canonical_album', full_name='Track.normalized_canonical_album', index=51,
number=54, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='normalized_canonical_artist', full_name='Track.normalized_canonical_artist', index=52,
number=55, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='uploader_id', full_name='Track.uploader_id', index=53,
number=52, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='client_album_id', full_name='Track.client_album_id', index=54,
number=53, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='label_owner_code', full_name='Track.label_owner_code', index=55,
number=60, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='original_content_type', full_name='Track.original_content_type', index=56,
number=61, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='uploaded_uits', full_name='Track.uploaded_uits', index=57,
number=71, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_TRACK_AVAILABILITYSTATUS,
_TRACK_CONTENTTYPE,
_TRACK_CHANNELS,
_TRACK_TRACKTYPE,
_TRACK_RATING,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=543,
serialized_end=2603,
)
_TRACKS = descriptor.Descriptor(
name='Tracks',
full_name='Tracks',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='track', full_name='Tracks.track', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2605,
serialized_end=2636,
)
_PLAYLIST = descriptor.Descriptor(
name='Playlist',
full_name='Playlist',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='id', full_name='Playlist.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='client_id', full_name='Playlist.client_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='creation_timestamp', full_name='Playlist.creation_timestamp', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='last_modified_timestamp', full_name='Playlist.last_modified_timestamp', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='deleted', full_name='Playlist.deleted', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='name', full_name='Playlist.name', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_type', full_name='Playlist.playlist_type', index=6,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_art_ref', full_name='Playlist.playlist_art_ref', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='recent_timestamp', full_name='Playlist.recent_timestamp', index=8,
number=9, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_PLAYLIST_PLAYLISTTYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2639,
serialized_end=2947,
)
_PLAYLISTENTRY = descriptor.Descriptor(
name='PlaylistEntry',
full_name='PlaylistEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='playlist_id', full_name='PlaylistEntry.playlist_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='absolute_position', full_name='PlaylistEntry.absolute_position', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='place_after_entry_id', full_name='PlaylistEntry.place_after_entry_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='track_id', full_name='PlaylistEntry.track_id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='id', full_name='PlaylistEntry.id', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='client_id', full_name='PlaylistEntry.client_id', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='creation_timestamp', full_name='PlaylistEntry.creation_timestamp', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='last_modified_timestamp', full_name='PlaylistEntry.last_modified_timestamp', index=7,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='deleted', full_name='PlaylistEntry.deleted', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='relative_position_id_type', full_name='PlaylistEntry.relative_position_id_type', index=9,
number=10, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='track', full_name='PlaylistEntry.track', index=10,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='place_before_entry_id', full_name='PlaylistEntry.place_before_entry_id', index=11,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='string_position', full_name='PlaylistEntry.string_position', index=12,
number=17, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_PLAYLISTENTRY_RELATIVEPOSITIONIDTYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2950,
serialized_end=3380,
)
_TRACKSEARCHRESTRICTION = descriptor.Descriptor(
name='TrackSearchRestriction',
full_name='TrackSearchRestriction',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='attribute', full_name='TrackSearchRestriction.attribute', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='value', full_name='TrackSearchRestriction.value', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='comparison_type', full_name='TrackSearchRestriction.comparison_type', index=2,
number=3, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_TRACKSEARCHRESTRICTION_TRACKATTRIBUTE,
_TRACKSEARCHRESTRICTION_COMPARISONTYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3383,
serialized_end=3847,
)
_TRACKSEARCHRESTRICTIONSET = descriptor.Descriptor(
name='TrackSearchRestrictionSet',
full_name='TrackSearchRestrictionSet',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='type', full_name='TrackSearchRestrictionSet.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='restriction', full_name='TrackSearchRestrictionSet.restriction', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='sub_set', full_name='TrackSearchRestrictionSet.sub_set', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_TRACKSEARCHRESTRICTIONSET_RESTRICTIONSETTYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3850,
serialized_end=4068,
)
_TRACKSORTORDER = descriptor.Descriptor(
name='TrackSortOrder',
full_name='TrackSortOrder',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='attribute', full_name='TrackSortOrder.attribute', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='descending', full_name='TrackSortOrder.descending', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_TRACKSORTORDER_TRACKATTRIBUTE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=4071,
serialized_end=4327,
)
_GETTRACKSREQUEST = descriptor.Descriptor(
name='GetTracksRequest',
full_name='GetTracksRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='GetTracksRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='updated_min', full_name='GetTracksRequest.updated_min', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='include_deleted', full_name='GetTracksRequest.include_deleted', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_results', full_name='GetTracksRequest.max_results', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='continuation_token', full_name='GetTracksRequest.continuation_token', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='search_restriction', full_name='GetTracksRequest.search_restriction', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='sort_order', full_name='GetTracksRequest.sort_order', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='restriction_set', full_name='GetTracksRequest.restriction_set', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='track_projection', full_name='GetTracksRequest.track_projection', index=8,
number=9, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETTRACKSREQUEST_TRACKPROJECTION,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=4330,
serialized_end=4712,
)
_GETTRACKSRESPONSE = descriptor.Descriptor(
name='GetTracksResponse',
full_name='GetTracksResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='response_code', full_name='GetTracksResponse.response_code', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='track', full_name='GetTracksResponse.track', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='estimated_total_results', full_name='GetTracksResponse.estimated_total_results', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='continuation_token', full_name='GetTracksResponse.continuation_token', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETTRACKSRESPONSE_RESPONSECODE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=4715,
serialized_end=4926,
)
_GETPLAYLISTENTRIESREQUEST = descriptor.Descriptor(
name='GetPlaylistEntriesRequest',
full_name='GetPlaylistEntriesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='GetPlaylistEntriesRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='updated_min', full_name='GetPlaylistEntriesRequest.updated_min', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='include_deleted', full_name='GetPlaylistEntriesRequest.include_deleted', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_results', full_name='GetPlaylistEntriesRequest.max_results', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='continuation_token', full_name='GetPlaylistEntriesRequest.continuation_token', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_id_filter', full_name='GetPlaylistEntriesRequest.playlist_id_filter', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='include_all_track_metadata', full_name='GetPlaylistEntriesRequest.include_all_track_metadata', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='only_show_available_tracks', full_name='GetPlaylistEntriesRequest.only_show_available_tracks', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=4929,
serialized_end=5181,
)
_GETPLAYLISTENTRIESRESPONSE = descriptor.Descriptor(
name='GetPlaylistEntriesResponse',
full_name='GetPlaylistEntriesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='response_code', full_name='GetPlaylistEntriesResponse.response_code', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_entry', full_name='GetPlaylistEntriesResponse.playlist_entry', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='estimated_total_results', full_name='GetPlaylistEntriesResponse.estimated_total_results', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='continuation_token', full_name='GetPlaylistEntriesResponse.continuation_token', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETPLAYLISTENTRIESRESPONSE_RESPONSECODE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5184,
serialized_end=5430,
)
_PLAYLISTSORTORDER = descriptor.Descriptor(
name='PlaylistSortOrder',
full_name='PlaylistSortOrder',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='attribute', full_name='PlaylistSortOrder.attribute', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='descending', full_name='PlaylistSortOrder.descending', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_PLAYLISTSORTORDER_PLAYLISTATTRIBUTE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5433,
serialized_end=5633,
)
_GETPLAYLISTSREQUEST = descriptor.Descriptor(
name='GetPlaylistsRequest',
full_name='GetPlaylistsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='GetPlaylistsRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='updated_min', full_name='GetPlaylistsRequest.updated_min', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='include_deleted', full_name='GetPlaylistsRequest.include_deleted', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_results', full_name='GetPlaylistsRequest.max_results', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='continuation_token', full_name='GetPlaylistsRequest.continuation_token', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='sort_order', full_name='GetPlaylistsRequest.sort_order', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5636,
serialized_end=5809,
)
_GETPLAYLISTSRESPONSE = descriptor.Descriptor(
name='GetPlaylistsResponse',
full_name='GetPlaylistsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='response_code', full_name='GetPlaylistsResponse.response_code', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist', full_name='GetPlaylistsResponse.playlist', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='estimated_total_results', full_name='GetPlaylistsResponse.estimated_total_results', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='continuation_token', full_name='GetPlaylistsResponse.continuation_token', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETPLAYLISTSRESPONSE_RESPONSECODE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5812,
serialized_end=6035,
)
_LOOKUPTRACKREQUEST = descriptor.Descriptor(
name='LookupTrackRequest',
full_name='LookupTrackRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='id', full_name='LookupTrackRequest.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='client_id', full_name='LookupTrackRequest.client_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6037,
serialized_end=6088,
)
_LOOKUPPLAYLISTENTRYREQUEST = descriptor.Descriptor(
name='LookupPlaylistEntryRequest',
full_name='LookupPlaylistEntryRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='id', full_name='LookupPlaylistEntryRequest.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='client_id', full_name='LookupPlaylistEntryRequest.client_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6090,
serialized_end=6149,
)
_LOOKUPPLAYLISTREQUEST = descriptor.Descriptor(
name='LookupPlaylistRequest',
full_name='LookupPlaylistRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='id', full_name='LookupPlaylistRequest.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='client_id', full_name='LookupPlaylistRequest.client_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6151,
serialized_end=6205,
)
_BATCHLOOKUPREQUEST = descriptor.Descriptor(
name='BatchLookupRequest',
full_name='BatchLookupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='BatchLookupRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='track', full_name='BatchLookupRequest.track', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist', full_name='BatchLookupRequest.playlist', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='metadata_type', full_name='BatchLookupRequest.metadata_type', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_entry', full_name='BatchLookupRequest.playlist_entry', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='include_deleted', full_name='BatchLookupRequest.include_deleted', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_BATCHLOOKUPREQUEST_METADATATYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6208,
serialized_end=6526,
)
_BATCHLOOKUPRESPONSE = descriptor.Descriptor(
name='BatchLookupResponse',
full_name='BatchLookupResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='track', full_name='BatchLookupResponse.track', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist', full_name='BatchLookupResponse.playlist', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_entry', full_name='BatchLookupResponse.playlist_entry', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6528,
serialized_end=6641,
)
_MUTATETRACKREQUEST = descriptor.Descriptor(
name='MutateTrackRequest',
full_name='MutateTrackRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='create_track', full_name='MutateTrackRequest.create_track', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='update_track', full_name='MutateTrackRequest.update_track', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='delete_track', full_name='MutateTrackRequest.delete_track', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='partial_update', full_name='MutateTrackRequest.partial_update', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='update_last_modified', full_name='MutateTrackRequest.update_last_modified', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='undelete_track', full_name='MutateTrackRequest.undelete_track', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6644,
serialized_end=6830,
)
_MUTATERESPONSE = descriptor.Descriptor(
name='MutateResponse',
full_name='MutateResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='response_code', full_name='MutateResponse.response_code', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='id', full_name='MutateResponse.id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='child_id', full_name='MutateResponse.child_id', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='client_id', full_name='MutateResponse.client_id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='availability_status', full_name='MutateResponse.availability_status', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='error_message', full_name='MutateResponse.error_message', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_MUTATERESPONSE_MUTATERESPONSECODE,
_MUTATERESPONSE_AVAILABILITYSTATUS,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6833,
serialized_end=7271,
)
_BATCHMUTATETRACKSREQUEST = descriptor.Descriptor(
name='BatchMutateTracksRequest',
full_name='BatchMutateTracksRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='BatchMutateTracksRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='track_mutation', full_name='BatchMutateTracksRequest.track_mutation', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='send_notification', full_name='BatchMutateTracksRequest.send_notification', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='detect_timestamp_conflict', full_name='BatchMutateTracksRequest.detect_timestamp_conflict', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='notify_fine_grained_updates', full_name='BatchMutateTracksRequest.notify_fine_grained_updates', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=7274,
serialized_end=7479,
)
_BATCHMUTATETRACKSRESPONSE = descriptor.Descriptor(
name='BatchMutateTracksResponse',
full_name='BatchMutateTracksResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='response_code', full_name='BatchMutateTracksResponse.response_code', index=0,
number=1, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='mutate_response', full_name='BatchMutateTracksResponse.mutate_response', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_BATCHMUTATETRACKSRESPONSE_BATCHMUTATETRACKSRESPONSECODE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=7482,
serialized_end=7687,
)
_MUTATEPLAYLISTREQUEST = descriptor.Descriptor(
name='MutatePlaylistRequest',
full_name='MutatePlaylistRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='create_playlist', full_name='MutatePlaylistRequest.create_playlist', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='update_playlist', full_name='MutatePlaylistRequest.update_playlist', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='delete_playlist', full_name='MutatePlaylistRequest.delete_playlist', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='partial_update', full_name='MutatePlaylistRequest.partial_update', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_entry', full_name='MutatePlaylistRequest.playlist_entry', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='update_last_modified', full_name='MutatePlaylistRequest.update_last_modified', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='undelete_playlist', full_name='MutatePlaylistRequest.undelete_playlist', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=7690,
serialized_end=7937,
)
_BATCHMUTATEPLAYLISTSREQUEST = descriptor.Descriptor(
name='BatchMutatePlaylistsRequest',
full_name='BatchMutatePlaylistsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='BatchMutatePlaylistsRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_mutation', full_name='BatchMutatePlaylistsRequest.playlist_mutation', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='send_notification', full_name='BatchMutatePlaylistsRequest.send_notification', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='detect_timestamp_conflict', full_name='BatchMutatePlaylistsRequest.detect_timestamp_conflict', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='notify_fine_grained_updates', full_name='BatchMutatePlaylistsRequest.notify_fine_grained_updates', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=7940,
serialized_end=8155,
)
_BATCHMUTATEPLAYLISTSRESPONSE = descriptor.Descriptor(
name='BatchMutatePlaylistsResponse',
full_name='BatchMutatePlaylistsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='response_code', full_name='BatchMutatePlaylistsResponse.response_code', index=0,
number=1, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='mutate_response', full_name='BatchMutatePlaylistsResponse.mutate_response', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_BATCHMUTATEPLAYLISTSRESPONSE_BATCHMUTATEPLAYLISTSRESPONSECODE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=8158,
serialized_end=8375,
)
_MUTATEPLAYLISTENTRYREQUEST = descriptor.Descriptor(
name='MutatePlaylistEntryRequest',
full_name='MutatePlaylistEntryRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='create_playlist_entry', full_name='MutatePlaylistEntryRequest.create_playlist_entry', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='update_playlist_entry', full_name='MutatePlaylistEntryRequest.update_playlist_entry', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='delete_playlist_entry', full_name='MutatePlaylistEntryRequest.delete_playlist_entry', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='update_last_modified', full_name='MutatePlaylistEntryRequest.update_last_modified', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='undelete_playlist_entry', full_name='MutatePlaylistEntryRequest.undelete_playlist_entry', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=8378,
serialized_end=8616,
)
_BATCHMUTATEPLAYLISTENTRIESREQUEST = descriptor.Descriptor(
name='BatchMutatePlaylistEntriesRequest',
full_name='BatchMutatePlaylistEntriesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='BatchMutatePlaylistEntriesRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_entry_mutation', full_name='BatchMutatePlaylistEntriesRequest.playlist_entry_mutation', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='send_notification', full_name='BatchMutatePlaylistEntriesRequest.send_notification', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='detect_timestamp_conflict', full_name='BatchMutatePlaylistEntriesRequest.detect_timestamp_conflict', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='notify_fine_grained_updates', full_name='BatchMutatePlaylistEntriesRequest.notify_fine_grained_updates', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=8619,
serialized_end=8851,
)
_BATCHMUTATEPLAYLISTENTRIESRESPONSE = descriptor.Descriptor(
name='BatchMutatePlaylistEntriesResponse',
full_name='BatchMutatePlaylistEntriesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='response_code', full_name='BatchMutatePlaylistEntriesResponse.response_code', index=0,
number=1, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='mutate_response', full_name='BatchMutatePlaylistEntriesResponse.mutate_response', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_BATCHMUTATEPLAYLISTENTRIESRESPONSE_BATCHMUTATEPLAYLISTENTRIESRESPONSECODE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=8854,
serialized_end=9095,
)
_MAGICPLAYLISTSEED = descriptor.Descriptor(
name='MagicPlaylistSeed',
full_name='MagicPlaylistSeed',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='seed_type', full_name='MagicPlaylistSeed.seed_type', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='seed', full_name='MagicPlaylistSeed.seed', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_MAGICPLAYLISTSEED_SEEDTYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=9098,
serialized_end=9242,
)
_MAGICPLAYLISTREQUEST = descriptor.Descriptor(
name='MagicPlaylistRequest',
full_name='MagicPlaylistRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='MagicPlaylistRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_name', full_name='MagicPlaylistRequest.playlist_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_id', full_name='MagicPlaylistRequest.playlist_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='seed', full_name='MagicPlaylistRequest.seed', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='num_recommendations', full_name='MagicPlaylistRequest.num_recommendations', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='include_all_track_metadata', full_name='MagicPlaylistRequest.include_all_track_metadata', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='model_name', full_name='MagicPlaylistRequest.model_name', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=9245,
serialized_end=9454,
)
_MAGICPLAYLISTRESPONSE = descriptor.Descriptor(
name='MagicPlaylistResponse',
full_name='MagicPlaylistResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='playlist', full_name='MagicPlaylistResponse.playlist', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_entry', full_name='MagicPlaylistResponse.playlist_entry', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=9456,
serialized_end=9548,
)
_FLUSHLOCKERREQUEST = descriptor.Descriptor(
name='FlushLockerRequest',
full_name='FlushLockerRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='FlushLockerRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='gaia_cookie', full_name='FlushLockerRequest.gaia_cookie', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='remove_audio_binaries', full_name='FlushLockerRequest.remove_audio_binaries', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='remove_image_binaries', full_name='FlushLockerRequest.remove_image_binaries', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='send_notification', full_name='FlushLockerRequest.send_notification', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='reset_subscription_type', full_name='FlushLockerRequest.reset_subscription_type', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='notify_fine_grained_updates', full_name='FlushLockerRequest.notify_fine_grained_updates', index=6,
number=8, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=9551,
serialized_end=9799,
)
_FLUSHLOCKERRESPONSE = descriptor.Descriptor(
name='FlushLockerResponse',
full_name='FlushLockerResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='tracks_removed', full_name='FlushLockerResponse.tracks_removed', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='entries_removed', full_name='FlushLockerResponse.entries_removed', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlists_removed', full_name='FlushLockerResponse.playlists_removed', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='success_reset_subscription_type', full_name='FlushLockerResponse.success_reset_subscription_type', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=9802,
serialized_end=9940,
)
_LOCKERNOTIFICATION = descriptor.Descriptor(
name='LockerNotification',
full_name='LockerNotification',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='LockerNotification.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='payload', full_name='LockerNotification.payload', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=9942,
serialized_end=9996,
)
_ALBUM = descriptor.Descriptor(
name='Album',
full_name='Album',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='Album.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='album_artist', full_name='Album.album_artist', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='album_art', full_name='Album.album_art', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='track_count', full_name='Album.track_count', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='last_time_played', full_name='Album.last_time_played', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='is_compilation', full_name='Album.is_compilation', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='album_metajam_id', full_name='Album.album_metajam_id', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='creation_timestamp', full_name='Album.creation_timestamp', index=7,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='artist', full_name='Album.artist', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=9999,
serialized_end=10213,
)
_ALBUMSORTORDER = descriptor.Descriptor(
name='AlbumSortOrder',
full_name='AlbumSortOrder',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='attribute', full_name='AlbumSortOrder.attribute', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='descending', full_name='AlbumSortOrder.descending', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_ALBUMSORTORDER_ALBUMATTRIBUTE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=10216,
serialized_end=10379,
)
_GETALBUMSREQUEST = descriptor.Descriptor(
name='GetAlbumsRequest',
full_name='GetAlbumsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='GetAlbumsRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='sort_order', full_name='GetAlbumsRequest.sort_order', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_results', full_name='GetAlbumsRequest.max_results', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=10381,
serialized_end=10474,
)
_GETALBUMSRESPONSE = descriptor.Descriptor(
name='GetAlbumsResponse',
full_name='GetAlbumsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='album', full_name='GetAlbumsResponse.album', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=10476,
serialized_end=10518,
)
_ARTIST = descriptor.Descriptor(
name='Artist',
full_name='Artist',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='Artist.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='total_track_count', full_name='Artist.total_track_count', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='album', full_name='Artist.album', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=10520,
serialized_end=10592,
)
_ARTISTSORTORDER = descriptor.Descriptor(
name='ArtistSortOrder',
full_name='ArtistSortOrder',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='descending', full_name='ArtistSortOrder.descending', index=0,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=10594,
serialized_end=10638,
)
_GETARTISTSREQUEST = descriptor.Descriptor(
name='GetArtistsRequest',
full_name='GetArtistsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='GetArtistsRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='sort_order', full_name='GetArtistsRequest.sort_order', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_results', full_name='GetArtistsRequest.max_results', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=10640,
serialized_end=10735,
)
_GETARTISTSRESPONSE = descriptor.Descriptor(
name='GetArtistsResponse',
full_name='GetArtistsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='artist', full_name='GetArtistsResponse.artist', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=10737,
serialized_end=10782,
)
_MUSICGENRE = descriptor.Descriptor(
name='MusicGenre',
full_name='MusicGenre',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='MusicGenre.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='total_track_count', full_name='MusicGenre.total_track_count', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='album', full_name='MusicGenre.album', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=10784,
serialized_end=10860,
)
_GENRESORTORDER = descriptor.Descriptor(
name='GenreSortOrder',
full_name='GenreSortOrder',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='descending', full_name='GenreSortOrder.descending', index=0,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=10862,
serialized_end=10905,
)
_GETGENRESREQUEST = descriptor.Descriptor(
name='GetGenresRequest',
full_name='GetGenresRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='GetGenresRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='sort_order', full_name='GetGenresRequest.sort_order', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_results', full_name='GetGenresRequest.max_results', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=10907,
serialized_end=11000,
)
_GETGENRESRESPONSE = descriptor.Descriptor(
name='GetGenresResponse',
full_name='GetGenresResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='genre', full_name='GetGenresResponse.genre', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=11002,
serialized_end=11049,
)
_GETDYNAMICPLAYLISTENTRIESREQUEST = descriptor.Descriptor(
name='GetDynamicPlaylistEntriesRequest',
full_name='GetDynamicPlaylistEntriesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='GetDynamicPlaylistEntriesRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_entries_type', full_name='GetDynamicPlaylistEntriesRequest.playlist_entries_type', index=1,
number=4, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_results', full_name='GetDynamicPlaylistEntriesRequest.max_results', index=2,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='continuation_token', full_name='GetDynamicPlaylistEntriesRequest.continuation_token', index=3,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='include_all_track_metadata', full_name='GetDynamicPlaylistEntriesRequest.include_all_track_metadata', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETDYNAMICPLAYLISTENTRIESREQUEST_DYNAMICPLAYLISTENTRIESTYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=11052,
serialized_end=11410,
)
_GETDYNAMICPLAYLISTENTRIESRESPONSE = descriptor.Descriptor(
name='GetDynamicPlaylistEntriesResponse',
full_name='GetDynamicPlaylistEntriesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='response_code', full_name='GetDynamicPlaylistEntriesResponse.response_code', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_entry', full_name='GetDynamicPlaylistEntriesResponse.playlist_entry', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='estimated_total_results', full_name='GetDynamicPlaylistEntriesResponse.estimated_total_results', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='continuation_token', full_name='GetDynamicPlaylistEntriesResponse.continuation_token', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_entries_type', full_name='GetDynamicPlaylistEntriesResponse.playlist_entries_type', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETDYNAMICPLAYLISTENTRIESRESPONSE_DYNAMICPLAYLISTENTRIESTYPE,
_GETDYNAMICPLAYLISTENTRIESRESPONSE_RESPONSECODE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=11413,
serialized_end=11887,
)
_GETAGGREGATIONSBYTRACKTYPEREQUEST = descriptor.Descriptor(
name='GetAggregationsByTrackTypeRequest',
full_name='GetAggregationsByTrackTypeRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='GetAggregationsByTrackTypeRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=11889,
serialized_end=11941,
)
_TRACKTYPEAGGREGATE = descriptor.Descriptor(
name='TrackTypeAggregate',
full_name='TrackTypeAggregate',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='track_type_value', full_name='TrackTypeAggregate.track_type_value', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='count', full_name='TrackTypeAggregate.count', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_TRACKTYPEAGGREGATE_TRACKTYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=11944,
serialized_end=12178,
)
_GETAGGREGATIONSBYTRACKTYPERESPONSE = descriptor.Descriptor(
name='GetAggregationsByTrackTypeResponse',
full_name='GetAggregationsByTrackTypeResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='track_type_aggregate', full_name='GetAggregationsByTrackTypeResponse.track_type_aggregate', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=12180,
serialized_end=12267,
)
_GETAGGREGATIONSBYAVAILABILITYSTATUSREQUEST = descriptor.Descriptor(
name='GetAggregationsByAvailabilityStatusRequest',
full_name='GetAggregationsByAvailabilityStatusRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='GetAggregationsByAvailabilityStatusRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=12269,
serialized_end=12330,
)
_AVAILABILITYSTATUSAGGREGATE = descriptor.Descriptor(
name='AvailabilityStatusAggregate',
full_name='AvailabilityStatusAggregate',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='availability_status', full_name='AvailabilityStatusAggregate.availability_status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='count', full_name='AvailabilityStatusAggregate.count', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_AVAILABILITYSTATUSAGGREGATE_AVAILABILITYSTATUS,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=12333,
serialized_end=12592,
)
_GETAGGREGATIONSBYAVAILABILITYSTATUSRESPONSE = descriptor.Descriptor(
name='GetAggregationsByAvailabilityStatusResponse',
full_name='GetAggregationsByAvailabilityStatusResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='availability_status_aggregate', full_name='GetAggregationsByAvailabilityStatusResponse.availability_status_aggregate', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=12594,
serialized_end=12708,
)
_ADDPROMOTRACKSREQUEST = descriptor.Descriptor(
name='AddPromoTracksRequest',
full_name='AddPromoTracksRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='AddPromoTracksRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='genre', full_name='AddPromoTracksRequest.genre', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=12710,
serialized_end=12765,
)
_ADDPROMOTRACKSRESPONSE = descriptor.Descriptor(
name='AddPromoTracksResponse',
full_name='AddPromoTracksResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='track', full_name='AddPromoTracksResponse.track', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=12767,
serialized_end=12814,
)
_GETPLAYLISTAGGREGATIONSREQUEST = descriptor.Descriptor(
name='GetPlaylistAggregationsRequest',
full_name='GetPlaylistAggregationsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='GetPlaylistAggregationsRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_results', full_name='GetPlaylistAggregationsRequest.max_results', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=14,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=12816,
serialized_end=12890,
)
_PLAYLISTAGGREGATE = descriptor.Descriptor(
name='PlaylistAggregate',
full_name='PlaylistAggregate',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='playlist_id', full_name='PlaylistAggregate.playlist_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='name', full_name='PlaylistAggregate.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='album_art', full_name='PlaylistAggregate.album_art', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='track_count', full_name='PlaylistAggregate.track_count', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='last_time_played', full_name='PlaylistAggregate.last_time_played', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=12893,
serialized_end=13024,
)
_GETPLAYLISTAGGREGATIONSRESPONSE = descriptor.Descriptor(
name='GetPlaylistAggregationsResponse',
full_name='GetPlaylistAggregationsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='playlist_aggregate', full_name='GetPlaylistAggregationsResponse.playlist_aggregate', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=13026,
serialized_end=13107,
)
_REMOTECONTROLCOMMANDREQUEST = descriptor.Descriptor(
name='RemoteControlCommandRequest',
full_name='RemoteControlCommandRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='RemoteControlCommandRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='command', full_name='RemoteControlCommandRequest.command', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=13109,
serialized_end=13172,
)
_REMOTECONTROLCOMMANDRESPONSE = descriptor.Descriptor(
name='RemoteControlCommandResponse',
full_name='RemoteControlCommandResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='response_code', full_name='RemoteControlCommandResponse.response_code', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_REMOTECONTROLCOMMANDRESPONSE_RESPONSECODE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=13175,
serialized_end=13354,
)
_AUDIOREF.fields_by_name['store'].enum_type = _AUDIOREF_STORE
_AUDIOREF_STORE.containing_type = _AUDIOREF;
_IMAGEREF.fields_by_name['store'].enum_type = _IMAGEREF_STORE
_IMAGEREF.fields_by_name['origin'].enum_type = _IMAGEREF_ORIGIN
_IMAGEREF_STORE.containing_type = _IMAGEREF;
_IMAGEREF_ORIGIN.containing_type = _IMAGEREF;
_TRACK.fields_by_name['audio_ref'].message_type = _AUDIOREF
_TRACK.fields_by_name['album_art_ref'].message_type = _IMAGEREF
_TRACK.fields_by_name['availability_status'].enum_type = _TRACK_AVAILABILITYSTATUS
_TRACK.fields_by_name['content_type'].enum_type = _TRACK_CONTENTTYPE
_TRACK.fields_by_name['channels'].enum_type = _TRACK_CHANNELS
_TRACK.fields_by_name['track_type'].enum_type = _TRACK_TRACKTYPE
_TRACK.fields_by_name['rating'].enum_type = _TRACK_RATING
_TRACK.fields_by_name['uits_metadata'].message_type = uits_pb2._UITSMETADATA
_TRACK.fields_by_name['original_content_type'].enum_type = _TRACK_CONTENTTYPE
_TRACK.fields_by_name['uploaded_uits'].message_type = _UPLOADEDUITSID3TAG
_TRACK_AVAILABILITYSTATUS.containing_type = _TRACK;
_TRACK_CONTENTTYPE.containing_type = _TRACK;
_TRACK_CHANNELS.containing_type = _TRACK;
_TRACK_TRACKTYPE.containing_type = _TRACK;
_TRACK_RATING.containing_type = _TRACK;
_TRACKS.fields_by_name['track'].message_type = _TRACK
_PLAYLIST.fields_by_name['playlist_type'].enum_type = _PLAYLIST_PLAYLISTTYPE
_PLAYLIST.fields_by_name['playlist_art_ref'].message_type = _IMAGEREF
_PLAYLIST_PLAYLISTTYPE.containing_type = _PLAYLIST;
_PLAYLISTENTRY.fields_by_name['relative_position_id_type'].enum_type = _PLAYLISTENTRY_RELATIVEPOSITIONIDTYPE
_PLAYLISTENTRY.fields_by_name['track'].message_type = _TRACK
_PLAYLISTENTRY_RELATIVEPOSITIONIDTYPE.containing_type = _PLAYLISTENTRY;
_TRACKSEARCHRESTRICTION.fields_by_name['attribute'].enum_type = _TRACKSEARCHRESTRICTION_TRACKATTRIBUTE
_TRACKSEARCHRESTRICTION.fields_by_name['comparison_type'].enum_type = _TRACKSEARCHRESTRICTION_COMPARISONTYPE
_TRACKSEARCHRESTRICTION_TRACKATTRIBUTE.containing_type = _TRACKSEARCHRESTRICTION;
_TRACKSEARCHRESTRICTION_COMPARISONTYPE.containing_type = _TRACKSEARCHRESTRICTION;
_TRACKSEARCHRESTRICTIONSET.fields_by_name['type'].enum_type = _TRACKSEARCHRESTRICTIONSET_RESTRICTIONSETTYPE
_TRACKSEARCHRESTRICTIONSET.fields_by_name['restriction'].message_type = _TRACKSEARCHRESTRICTION
_TRACKSEARCHRESTRICTIONSET.fields_by_name['sub_set'].message_type = _TRACKSEARCHRESTRICTIONSET
_TRACKSEARCHRESTRICTIONSET_RESTRICTIONSETTYPE.containing_type = _TRACKSEARCHRESTRICTIONSET;
_TRACKSORTORDER.fields_by_name['attribute'].enum_type = _TRACKSORTORDER_TRACKATTRIBUTE
_TRACKSORTORDER_TRACKATTRIBUTE.containing_type = _TRACKSORTORDER;
_GETTRACKSREQUEST.fields_by_name['search_restriction'].message_type = _TRACKSEARCHRESTRICTION
_GETTRACKSREQUEST.fields_by_name['sort_order'].message_type = _TRACKSORTORDER
_GETTRACKSREQUEST.fields_by_name['restriction_set'].message_type = _TRACKSEARCHRESTRICTIONSET
_GETTRACKSREQUEST.fields_by_name['track_projection'].enum_type = _GETTRACKSREQUEST_TRACKPROJECTION
_GETTRACKSREQUEST_TRACKPROJECTION.containing_type = _GETTRACKSREQUEST;
_GETTRACKSRESPONSE.fields_by_name['response_code'].enum_type = _GETTRACKSRESPONSE_RESPONSECODE
_GETTRACKSRESPONSE.fields_by_name['track'].message_type = _TRACK
_GETTRACKSRESPONSE_RESPONSECODE.containing_type = _GETTRACKSRESPONSE;
_GETPLAYLISTENTRIESRESPONSE.fields_by_name['response_code'].enum_type = _GETPLAYLISTENTRIESRESPONSE_RESPONSECODE
_GETPLAYLISTENTRIESRESPONSE.fields_by_name['playlist_entry'].message_type = _PLAYLISTENTRY
_GETPLAYLISTENTRIESRESPONSE_RESPONSECODE.containing_type = _GETPLAYLISTENTRIESRESPONSE;
_PLAYLISTSORTORDER.fields_by_name['attribute'].enum_type = _PLAYLISTSORTORDER_PLAYLISTATTRIBUTE
_PLAYLISTSORTORDER_PLAYLISTATTRIBUTE.containing_type = _PLAYLISTSORTORDER;
_GETPLAYLISTSREQUEST.fields_by_name['sort_order'].message_type = _PLAYLISTSORTORDER
_GETPLAYLISTSRESPONSE.fields_by_name['response_code'].enum_type = _GETPLAYLISTSRESPONSE_RESPONSECODE
_GETPLAYLISTSRESPONSE.fields_by_name['playlist'].message_type = _PLAYLIST
_GETPLAYLISTSRESPONSE_RESPONSECODE.containing_type = _GETPLAYLISTSRESPONSE;
_BATCHLOOKUPREQUEST.fields_by_name['track'].message_type = _LOOKUPTRACKREQUEST
_BATCHLOOKUPREQUEST.fields_by_name['playlist'].message_type = _LOOKUPPLAYLISTREQUEST
_BATCHLOOKUPREQUEST.fields_by_name['metadata_type'].enum_type = _BATCHLOOKUPREQUEST_METADATATYPE
_BATCHLOOKUPREQUEST.fields_by_name['playlist_entry'].message_type = _LOOKUPPLAYLISTENTRYREQUEST
_BATCHLOOKUPREQUEST_METADATATYPE.containing_type = _BATCHLOOKUPREQUEST;
_BATCHLOOKUPRESPONSE.fields_by_name['track'].message_type = _TRACK
_BATCHLOOKUPRESPONSE.fields_by_name['playlist'].message_type = _PLAYLIST
_BATCHLOOKUPRESPONSE.fields_by_name['playlist_entry'].message_type = _PLAYLISTENTRY
_MUTATETRACKREQUEST.fields_by_name['create_track'].message_type = _TRACK
_MUTATETRACKREQUEST.fields_by_name['update_track'].message_type = _TRACK
_MUTATERESPONSE.fields_by_name['response_code'].enum_type = _MUTATERESPONSE_MUTATERESPONSECODE
_MUTATERESPONSE.fields_by_name['availability_status'].enum_type = _MUTATERESPONSE_AVAILABILITYSTATUS
_MUTATERESPONSE_MUTATERESPONSECODE.containing_type = _MUTATERESPONSE;
_MUTATERESPONSE_AVAILABILITYSTATUS.containing_type = _MUTATERESPONSE;
_BATCHMUTATETRACKSREQUEST.fields_by_name['track_mutation'].message_type = _MUTATETRACKREQUEST
_BATCHMUTATETRACKSRESPONSE.fields_by_name['response_code'].enum_type = _BATCHMUTATETRACKSRESPONSE_BATCHMUTATETRACKSRESPONSECODE
_BATCHMUTATETRACKSRESPONSE.fields_by_name['mutate_response'].message_type = _MUTATERESPONSE
_BATCHMUTATETRACKSRESPONSE_BATCHMUTATETRACKSRESPONSECODE.containing_type = _BATCHMUTATETRACKSRESPONSE;
_MUTATEPLAYLISTREQUEST.fields_by_name['create_playlist'].message_type = _PLAYLIST
_MUTATEPLAYLISTREQUEST.fields_by_name['update_playlist'].message_type = _PLAYLIST
_MUTATEPLAYLISTREQUEST.fields_by_name['playlist_entry'].message_type = _PLAYLISTENTRY
_BATCHMUTATEPLAYLISTSREQUEST.fields_by_name['playlist_mutation'].message_type = _MUTATEPLAYLISTREQUEST
_BATCHMUTATEPLAYLISTSRESPONSE.fields_by_name['response_code'].enum_type = _BATCHMUTATEPLAYLISTSRESPONSE_BATCHMUTATEPLAYLISTSRESPONSECODE
_BATCHMUTATEPLAYLISTSRESPONSE.fields_by_name['mutate_response'].message_type = _MUTATERESPONSE
_BATCHMUTATEPLAYLISTSRESPONSE_BATCHMUTATEPLAYLISTSRESPONSECODE.containing_type = _BATCHMUTATEPLAYLISTSRESPONSE;
_MUTATEPLAYLISTENTRYREQUEST.fields_by_name['create_playlist_entry'].message_type = _PLAYLISTENTRY
_MUTATEPLAYLISTENTRYREQUEST.fields_by_name['update_playlist_entry'].message_type = _PLAYLISTENTRY
_MUTATEPLAYLISTENTRYREQUEST.fields_by_name['delete_playlist_entry'].message_type = _PLAYLISTENTRY
_BATCHMUTATEPLAYLISTENTRIESREQUEST.fields_by_name['playlist_entry_mutation'].message_type = _MUTATEPLAYLISTENTRYREQUEST
_BATCHMUTATEPLAYLISTENTRIESRESPONSE.fields_by_name['response_code'].enum_type = _BATCHMUTATEPLAYLISTENTRIESRESPONSE_BATCHMUTATEPLAYLISTENTRIESRESPONSECODE
_BATCHMUTATEPLAYLISTENTRIESRESPONSE.fields_by_name['mutate_response'].message_type = _MUTATERESPONSE
_BATCHMUTATEPLAYLISTENTRIESRESPONSE_BATCHMUTATEPLAYLISTENTRIESRESPONSECODE.containing_type = _BATCHMUTATEPLAYLISTENTRIESRESPONSE;
_MAGICPLAYLISTSEED.fields_by_name['seed_type'].enum_type = _MAGICPLAYLISTSEED_SEEDTYPE
_MAGICPLAYLISTSEED_SEEDTYPE.containing_type = _MAGICPLAYLISTSEED;
_MAGICPLAYLISTREQUEST.fields_by_name['seed'].message_type = _MAGICPLAYLISTSEED
_MAGICPLAYLISTRESPONSE.fields_by_name['playlist'].message_type = _PLAYLIST
_MAGICPLAYLISTRESPONSE.fields_by_name['playlist_entry'].message_type = _PLAYLISTENTRY
_ALBUM.fields_by_name['album_art'].message_type = _IMAGEREF
_ALBUMSORTORDER.fields_by_name['attribute'].enum_type = _ALBUMSORTORDER_ALBUMATTRIBUTE
_ALBUMSORTORDER_ALBUMATTRIBUTE.containing_type = _ALBUMSORTORDER;
_GETALBUMSREQUEST.fields_by_name['sort_order'].message_type = _ALBUMSORTORDER
_GETALBUMSRESPONSE.fields_by_name['album'].message_type = _ALBUM
_ARTIST.fields_by_name['album'].message_type = _ALBUM
_GETARTISTSREQUEST.fields_by_name['sort_order'].message_type = _ARTISTSORTORDER
_GETARTISTSRESPONSE.fields_by_name['artist'].message_type = _ARTIST
_MUSICGENRE.fields_by_name['album'].message_type = _ALBUM
_GETGENRESREQUEST.fields_by_name['sort_order'].message_type = _GENRESORTORDER
_GETGENRESRESPONSE.fields_by_name['genre'].message_type = _MUSICGENRE
_GETDYNAMICPLAYLISTENTRIESREQUEST.fields_by_name['playlist_entries_type'].enum_type = _GETDYNAMICPLAYLISTENTRIESREQUEST_DYNAMICPLAYLISTENTRIESTYPE
_GETDYNAMICPLAYLISTENTRIESREQUEST_DYNAMICPLAYLISTENTRIESTYPE.containing_type = _GETDYNAMICPLAYLISTENTRIESREQUEST;
_GETDYNAMICPLAYLISTENTRIESRESPONSE.fields_by_name['response_code'].enum_type = _GETDYNAMICPLAYLISTENTRIESRESPONSE_RESPONSECODE
_GETDYNAMICPLAYLISTENTRIESRESPONSE.fields_by_name['playlist_entry'].message_type = _PLAYLISTENTRY
_GETDYNAMICPLAYLISTENTRIESRESPONSE.fields_by_name['playlist_entries_type'].enum_type = _GETDYNAMICPLAYLISTENTRIESRESPONSE_DYNAMICPLAYLISTENTRIESTYPE
_GETDYNAMICPLAYLISTENTRIESRESPONSE_DYNAMICPLAYLISTENTRIESTYPE.containing_type = _GETDYNAMICPLAYLISTENTRIESRESPONSE;
_GETDYNAMICPLAYLISTENTRIESRESPONSE_RESPONSECODE.containing_type = _GETDYNAMICPLAYLISTENTRIESRESPONSE;
_TRACKTYPEAGGREGATE.fields_by_name['track_type_value'].enum_type = _TRACKTYPEAGGREGATE_TRACKTYPE
_TRACKTYPEAGGREGATE_TRACKTYPE.containing_type = _TRACKTYPEAGGREGATE;
_GETAGGREGATIONSBYTRACKTYPERESPONSE.fields_by_name['track_type_aggregate'].message_type = _TRACKTYPEAGGREGATE
_AVAILABILITYSTATUSAGGREGATE.fields_by_name['availability_status'].enum_type = _AVAILABILITYSTATUSAGGREGATE_AVAILABILITYSTATUS
_AVAILABILITYSTATUSAGGREGATE_AVAILABILITYSTATUS.containing_type = _AVAILABILITYSTATUSAGGREGATE;
_GETAGGREGATIONSBYAVAILABILITYSTATUSRESPONSE.fields_by_name['availability_status_aggregate'].message_type = _AVAILABILITYSTATUSAGGREGATE
_ADDPROMOTRACKSRESPONSE.fields_by_name['track'].message_type = _TRACK
_PLAYLISTAGGREGATE.fields_by_name['album_art'].message_type = _IMAGEREF
_GETPLAYLISTAGGREGATIONSRESPONSE.fields_by_name['playlist_aggregate'].message_type = _PLAYLISTAGGREGATE
_REMOTECONTROLCOMMANDRESPONSE.fields_by_name['response_code'].enum_type = _REMOTECONTROLCOMMANDRESPONSE_RESPONSECODE
_REMOTECONTROLCOMMANDRESPONSE_RESPONSECODE.containing_type = _REMOTECONTROLCOMMANDRESPONSE;
DESCRIPTOR.message_types_by_name['AudioRef'] = _AUDIOREF
DESCRIPTOR.message_types_by_name['ImageRef'] = _IMAGEREF
DESCRIPTOR.message_types_by_name['UploadedUitsId3Tag'] = _UPLOADEDUITSID3TAG
DESCRIPTOR.message_types_by_name['Track'] = _TRACK
DESCRIPTOR.message_types_by_name['Tracks'] = _TRACKS
DESCRIPTOR.message_types_by_name['Playlist'] = _PLAYLIST
DESCRIPTOR.message_types_by_name['PlaylistEntry'] = _PLAYLISTENTRY
DESCRIPTOR.message_types_by_name['TrackSearchRestriction'] = _TRACKSEARCHRESTRICTION
DESCRIPTOR.message_types_by_name['TrackSearchRestrictionSet'] = _TRACKSEARCHRESTRICTIONSET
DESCRIPTOR.message_types_by_name['TrackSortOrder'] = _TRACKSORTORDER
DESCRIPTOR.message_types_by_name['GetTracksRequest'] = _GETTRACKSREQUEST
DESCRIPTOR.message_types_by_name['GetTracksResponse'] = _GETTRACKSRESPONSE
DESCRIPTOR.message_types_by_name['GetPlaylistEntriesRequest'] = _GETPLAYLISTENTRIESREQUEST
DESCRIPTOR.message_types_by_name['GetPlaylistEntriesResponse'] = _GETPLAYLISTENTRIESRESPONSE
DESCRIPTOR.message_types_by_name['PlaylistSortOrder'] = _PLAYLISTSORTORDER
DESCRIPTOR.message_types_by_name['GetPlaylistsRequest'] = _GETPLAYLISTSREQUEST
DESCRIPTOR.message_types_by_name['GetPlaylistsResponse'] = _GETPLAYLISTSRESPONSE
DESCRIPTOR.message_types_by_name['LookupTrackRequest'] = _LOOKUPTRACKREQUEST
DESCRIPTOR.message_types_by_name['LookupPlaylistEntryRequest'] = _LOOKUPPLAYLISTENTRYREQUEST
DESCRIPTOR.message_types_by_name['LookupPlaylistRequest'] = _LOOKUPPLAYLISTREQUEST
DESCRIPTOR.message_types_by_name['BatchLookupRequest'] = _BATCHLOOKUPREQUEST
DESCRIPTOR.message_types_by_name['BatchLookupResponse'] = _BATCHLOOKUPRESPONSE
DESCRIPTOR.message_types_by_name['MutateTrackRequest'] = _MUTATETRACKREQUEST
DESCRIPTOR.message_types_by_name['MutateResponse'] = _MUTATERESPONSE
DESCRIPTOR.message_types_by_name['BatchMutateTracksRequest'] = _BATCHMUTATETRACKSREQUEST
DESCRIPTOR.message_types_by_name['BatchMutateTracksResponse'] = _BATCHMUTATETRACKSRESPONSE
DESCRIPTOR.message_types_by_name['MutatePlaylistRequest'] = _MUTATEPLAYLISTREQUEST
DESCRIPTOR.message_types_by_name['BatchMutatePlaylistsRequest'] = _BATCHMUTATEPLAYLISTSREQUEST
DESCRIPTOR.message_types_by_name['BatchMutatePlaylistsResponse'] = _BATCHMUTATEPLAYLISTSRESPONSE
DESCRIPTOR.message_types_by_name['MutatePlaylistEntryRequest'] = _MUTATEPLAYLISTENTRYREQUEST
DESCRIPTOR.message_types_by_name['BatchMutatePlaylistEntriesRequest'] = _BATCHMUTATEPLAYLISTENTRIESREQUEST
DESCRIPTOR.message_types_by_name['BatchMutatePlaylistEntriesResponse'] = _BATCHMUTATEPLAYLISTENTRIESRESPONSE
DESCRIPTOR.message_types_by_name['MagicPlaylistSeed'] = _MAGICPLAYLISTSEED
DESCRIPTOR.message_types_by_name['MagicPlaylistRequest'] = _MAGICPLAYLISTREQUEST
DESCRIPTOR.message_types_by_name['MagicPlaylistResponse'] = _MAGICPLAYLISTRESPONSE
DESCRIPTOR.message_types_by_name['FlushLockerRequest'] = _FLUSHLOCKERREQUEST
DESCRIPTOR.message_types_by_name['FlushLockerResponse'] = _FLUSHLOCKERRESPONSE
DESCRIPTOR.message_types_by_name['LockerNotification'] = _LOCKERNOTIFICATION
DESCRIPTOR.message_types_by_name['Album'] = _ALBUM
DESCRIPTOR.message_types_by_name['AlbumSortOrder'] = _ALBUMSORTORDER
DESCRIPTOR.message_types_by_name['GetAlbumsRequest'] = _GETALBUMSREQUEST
DESCRIPTOR.message_types_by_name['GetAlbumsResponse'] = _GETALBUMSRESPONSE
DESCRIPTOR.message_types_by_name['Artist'] = _ARTIST
DESCRIPTOR.message_types_by_name['ArtistSortOrder'] = _ARTISTSORTORDER
DESCRIPTOR.message_types_by_name['GetArtistsRequest'] = _GETARTISTSREQUEST
DESCRIPTOR.message_types_by_name['GetArtistsResponse'] = _GETARTISTSRESPONSE
DESCRIPTOR.message_types_by_name['MusicGenre'] = _MUSICGENRE
DESCRIPTOR.message_types_by_name['GenreSortOrder'] = _GENRESORTORDER
DESCRIPTOR.message_types_by_name['GetGenresRequest'] = _GETGENRESREQUEST
DESCRIPTOR.message_types_by_name['GetGenresResponse'] = _GETGENRESRESPONSE
DESCRIPTOR.message_types_by_name['GetDynamicPlaylistEntriesRequest'] = _GETDYNAMICPLAYLISTENTRIESREQUEST
DESCRIPTOR.message_types_by_name['GetDynamicPlaylistEntriesResponse'] = _GETDYNAMICPLAYLISTENTRIESRESPONSE
DESCRIPTOR.message_types_by_name['GetAggregationsByTrackTypeRequest'] = _GETAGGREGATIONSBYTRACKTYPEREQUEST
DESCRIPTOR.message_types_by_name['TrackTypeAggregate'] = _TRACKTYPEAGGREGATE
DESCRIPTOR.message_types_by_name['GetAggregationsByTrackTypeResponse'] = _GETAGGREGATIONSBYTRACKTYPERESPONSE
DESCRIPTOR.message_types_by_name['GetAggregationsByAvailabilityStatusRequest'] = _GETAGGREGATIONSBYAVAILABILITYSTATUSREQUEST
DESCRIPTOR.message_types_by_name['AvailabilityStatusAggregate'] = _AVAILABILITYSTATUSAGGREGATE
DESCRIPTOR.message_types_by_name['GetAggregationsByAvailabilityStatusResponse'] = _GETAGGREGATIONSBYAVAILABILITYSTATUSRESPONSE
DESCRIPTOR.message_types_by_name['AddPromoTracksRequest'] = _ADDPROMOTRACKSREQUEST
DESCRIPTOR.message_types_by_name['AddPromoTracksResponse'] = _ADDPROMOTRACKSRESPONSE
DESCRIPTOR.message_types_by_name['GetPlaylistAggregationsRequest'] = _GETPLAYLISTAGGREGATIONSREQUEST
DESCRIPTOR.message_types_by_name['PlaylistAggregate'] = _PLAYLISTAGGREGATE
DESCRIPTOR.message_types_by_name['GetPlaylistAggregationsResponse'] = _GETPLAYLISTAGGREGATIONSRESPONSE
DESCRIPTOR.message_types_by_name['RemoteControlCommandRequest'] = _REMOTECONTROLCOMMANDREQUEST
DESCRIPTOR.message_types_by_name['RemoteControlCommandResponse'] = _REMOTECONTROLCOMMANDRESPONSE
class AudioRef(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _AUDIOREF
# @@protoc_insertion_point(class_scope:AudioRef)
class ImageRef(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _IMAGEREF
# @@protoc_insertion_point(class_scope:ImageRef)
class UploadedUitsId3Tag(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _UPLOADEDUITSID3TAG
# @@protoc_insertion_point(class_scope:UploadedUitsId3Tag)
class Track(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _TRACK
# @@protoc_insertion_point(class_scope:Track)
class Tracks(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _TRACKS
# @@protoc_insertion_point(class_scope:Tracks)
class Playlist(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PLAYLIST
# @@protoc_insertion_point(class_scope:Playlist)
class PlaylistEntry(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PLAYLISTENTRY
# @@protoc_insertion_point(class_scope:PlaylistEntry)
class TrackSearchRestriction(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _TRACKSEARCHRESTRICTION
# @@protoc_insertion_point(class_scope:TrackSearchRestriction)
class TrackSearchRestrictionSet(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _TRACKSEARCHRESTRICTIONSET
# @@protoc_insertion_point(class_scope:TrackSearchRestrictionSet)
class TrackSortOrder(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _TRACKSORTORDER
# @@protoc_insertion_point(class_scope:TrackSortOrder)
class GetTracksRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETTRACKSREQUEST
# @@protoc_insertion_point(class_scope:GetTracksRequest)
class GetTracksResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETTRACKSRESPONSE
# @@protoc_insertion_point(class_scope:GetTracksResponse)
class GetPlaylistEntriesRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETPLAYLISTENTRIESREQUEST
# @@protoc_insertion_point(class_scope:GetPlaylistEntriesRequest)
class GetPlaylistEntriesResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETPLAYLISTENTRIESRESPONSE
# @@protoc_insertion_point(class_scope:GetPlaylistEntriesResponse)
class PlaylistSortOrder(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PLAYLISTSORTORDER
# @@protoc_insertion_point(class_scope:PlaylistSortOrder)
class GetPlaylistsRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETPLAYLISTSREQUEST
# @@protoc_insertion_point(class_scope:GetPlaylistsRequest)
class GetPlaylistsResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETPLAYLISTSRESPONSE
# @@protoc_insertion_point(class_scope:GetPlaylistsResponse)
class LookupTrackRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _LOOKUPTRACKREQUEST
# @@protoc_insertion_point(class_scope:LookupTrackRequest)
class LookupPlaylistEntryRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _LOOKUPPLAYLISTENTRYREQUEST
# @@protoc_insertion_point(class_scope:LookupPlaylistEntryRequest)
class LookupPlaylistRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _LOOKUPPLAYLISTREQUEST
# @@protoc_insertion_point(class_scope:LookupPlaylistRequest)
class BatchLookupRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _BATCHLOOKUPREQUEST
# @@protoc_insertion_point(class_scope:BatchLookupRequest)
class BatchLookupResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _BATCHLOOKUPRESPONSE
# @@protoc_insertion_point(class_scope:BatchLookupResponse)
class MutateTrackRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _MUTATETRACKREQUEST
# @@protoc_insertion_point(class_scope:MutateTrackRequest)
class MutateResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _MUTATERESPONSE
# @@protoc_insertion_point(class_scope:MutateResponse)
class BatchMutateTracksRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _BATCHMUTATETRACKSREQUEST
# @@protoc_insertion_point(class_scope:BatchMutateTracksRequest)
class BatchMutateTracksResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _BATCHMUTATETRACKSRESPONSE
# @@protoc_insertion_point(class_scope:BatchMutateTracksResponse)
class MutatePlaylistRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _MUTATEPLAYLISTREQUEST
# @@protoc_insertion_point(class_scope:MutatePlaylistRequest)
class BatchMutatePlaylistsRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _BATCHMUTATEPLAYLISTSREQUEST
# @@protoc_insertion_point(class_scope:BatchMutatePlaylistsRequest)
class BatchMutatePlaylistsResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _BATCHMUTATEPLAYLISTSRESPONSE
# @@protoc_insertion_point(class_scope:BatchMutatePlaylistsResponse)
class MutatePlaylistEntryRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _MUTATEPLAYLISTENTRYREQUEST
# @@protoc_insertion_point(class_scope:MutatePlaylistEntryRequest)
class BatchMutatePlaylistEntriesRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _BATCHMUTATEPLAYLISTENTRIESREQUEST
# @@protoc_insertion_point(class_scope:BatchMutatePlaylistEntriesRequest)
class BatchMutatePlaylistEntriesResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _BATCHMUTATEPLAYLISTENTRIESRESPONSE
# @@protoc_insertion_point(class_scope:BatchMutatePlaylistEntriesResponse)
class MagicPlaylistSeed(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _MAGICPLAYLISTSEED
# @@protoc_insertion_point(class_scope:MagicPlaylistSeed)
class MagicPlaylistRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _MAGICPLAYLISTREQUEST
# @@protoc_insertion_point(class_scope:MagicPlaylistRequest)
class MagicPlaylistResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _MAGICPLAYLISTRESPONSE
# @@protoc_insertion_point(class_scope:MagicPlaylistResponse)
class FlushLockerRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _FLUSHLOCKERREQUEST
# @@protoc_insertion_point(class_scope:FlushLockerRequest)
class FlushLockerResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _FLUSHLOCKERRESPONSE
# @@protoc_insertion_point(class_scope:FlushLockerResponse)
class LockerNotification(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _LOCKERNOTIFICATION
# @@protoc_insertion_point(class_scope:LockerNotification)
class Album(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ALBUM
# @@protoc_insertion_point(class_scope:Album)
class AlbumSortOrder(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ALBUMSORTORDER
# @@protoc_insertion_point(class_scope:AlbumSortOrder)
class GetAlbumsRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETALBUMSREQUEST
# @@protoc_insertion_point(class_scope:GetAlbumsRequest)
class GetAlbumsResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETALBUMSRESPONSE
# @@protoc_insertion_point(class_scope:GetAlbumsResponse)
class Artist(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ARTIST
# @@protoc_insertion_point(class_scope:Artist)
class ArtistSortOrder(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ARTISTSORTORDER
# @@protoc_insertion_point(class_scope:ArtistSortOrder)
class GetArtistsRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETARTISTSREQUEST
# @@protoc_insertion_point(class_scope:GetArtistsRequest)
class GetArtistsResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETARTISTSRESPONSE
# @@protoc_insertion_point(class_scope:GetArtistsResponse)
class MusicGenre(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _MUSICGENRE
# @@protoc_insertion_point(class_scope:MusicGenre)
class GenreSortOrder(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GENRESORTORDER
# @@protoc_insertion_point(class_scope:GenreSortOrder)
class GetGenresRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETGENRESREQUEST
# @@protoc_insertion_point(class_scope:GetGenresRequest)
class GetGenresResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETGENRESRESPONSE
# @@protoc_insertion_point(class_scope:GetGenresResponse)
class GetDynamicPlaylistEntriesRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETDYNAMICPLAYLISTENTRIESREQUEST
# @@protoc_insertion_point(class_scope:GetDynamicPlaylistEntriesRequest)
class GetDynamicPlaylistEntriesResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETDYNAMICPLAYLISTENTRIESRESPONSE
# @@protoc_insertion_point(class_scope:GetDynamicPlaylistEntriesResponse)
class GetAggregationsByTrackTypeRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETAGGREGATIONSBYTRACKTYPEREQUEST
# @@protoc_insertion_point(class_scope:GetAggregationsByTrackTypeRequest)
class TrackTypeAggregate(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _TRACKTYPEAGGREGATE
# @@protoc_insertion_point(class_scope:TrackTypeAggregate)
class GetAggregationsByTrackTypeResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETAGGREGATIONSBYTRACKTYPERESPONSE
# @@protoc_insertion_point(class_scope:GetAggregationsByTrackTypeResponse)
class GetAggregationsByAvailabilityStatusRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETAGGREGATIONSBYAVAILABILITYSTATUSREQUEST
# @@protoc_insertion_point(class_scope:GetAggregationsByAvailabilityStatusRequest)
class AvailabilityStatusAggregate(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _AVAILABILITYSTATUSAGGREGATE
# @@protoc_insertion_point(class_scope:AvailabilityStatusAggregate)
class GetAggregationsByAvailabilityStatusResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETAGGREGATIONSBYAVAILABILITYSTATUSRESPONSE
# @@protoc_insertion_point(class_scope:GetAggregationsByAvailabilityStatusResponse)
class AddPromoTracksRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ADDPROMOTRACKSREQUEST
# @@protoc_insertion_point(class_scope:AddPromoTracksRequest)
class AddPromoTracksResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ADDPROMOTRACKSRESPONSE
# @@protoc_insertion_point(class_scope:AddPromoTracksResponse)
class GetPlaylistAggregationsRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETPLAYLISTAGGREGATIONSREQUEST
# @@protoc_insertion_point(class_scope:GetPlaylistAggregationsRequest)
class PlaylistAggregate(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PLAYLISTAGGREGATE
# @@protoc_insertion_point(class_scope:PlaylistAggregate)
class GetPlaylistAggregationsResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETPLAYLISTAGGREGATIONSRESPONSE
# @@protoc_insertion_point(class_scope:GetPlaylistAggregationsResponse)
class RemoteControlCommandRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _REMOTECONTROLCOMMANDREQUEST
# @@protoc_insertion_point(class_scope:RemoteControlCommandRequest)
class RemoteControlCommandResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _REMOTECONTROLCOMMANDRESPONSE
# @@protoc_insertion_point(class_scope:RemoteControlCommandResponse)
# @@protoc_insertion_point(module_scope)
| 40.620848 | 23,106 | 0.745915 |
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import descriptor_pb2
import uits_pb2
DESCRIPTOR = descriptor.FileDescriptor(
name='locker.proto',
package='',
serialized_pb='\n\x0clocker.proto\x1a\nuits.proto\"\xf8\x01\n\x08\x41udioRef\x12\x1e\n\x05store\x18\x01 \x02(\x0e\x32\x0f.AudioRef.Store\x12\x0b\n\x03ref\x18\x02 \x02(\x0c\x12\x0b\n\x03url\x18\x04 \x01(\t\x12\x10\n\x08\x62it_rate\x18\x05 \x01(\x05\x12\x13\n\x0bsample_rate\x18\x06 \x01(\x05\x12\x14\n\x0c\x64ownloadable\x18\x07 \x01(\x08\x12\x17\n\x0f\x64uration_millis\x18\x08 \x01(\x03\x12\x19\n\x11rematch_timestamp\x18\t \x01(\x03\x12\x1e\n\x16invalid_due_to_wipeout\x18\n \x01(\x08\"!\n\x05Store\x12\r\n\tBLOBSTORE\x10\x01\x12\t\n\x05SM_V2\x10\x02\"\xd1\x01\n\x08ImageRef\x12\x1e\n\x05store\x18\x01 \x01(\x0e\x32\x0f.ImageRef.Store\x12\r\n\x05width\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\x0b\n\x03url\x18\x06 \x01(\t\x12\x1e\n\x16invalid_due_to_wipeout\x18\x07 \x01(\x08\x12 \n\x06origin\x18\x08 \x01(\x0e\x32\x10.ImageRef.Origin\"\x14\n\x05Store\x12\x0b\n\x07SHOEBOX\x10\x03\"!\n\x06Origin\x12\x0c\n\x08PERSONAL\x10\x01\x12\t\n\x05STORE\x10\x02\"1\n\x12UploadedUitsId3Tag\x12\r\n\x05owner\x18\x01 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"\x8c\x10\n\x05Track\x12\n\n\x02id\x18\x01 \x01(\t\x12\x11\n\tclient_id\x18\x02 \x01(\t\x12\x1a\n\x12\x63reation_timestamp\x18\x03 \x01(\x03\x12\x1f\n\x17last_modified_timestamp\x18\x04 \x01(\x03\x12\x16\n\x07\x64\x65leted\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\r\n\x05title\x18\x06 \x01(\t\x12\x0e\n\x06\x61rtist\x18\x07 \x01(\t\x12\x13\n\x0b\x61rtist_hash\x18. \x01(\x03\x12\x10\n\x08\x63omposer\x18\x08 \x01(\t\x12\r\n\x05\x61lbum\x18\t \x01(\t\x12\x14\n\x0c\x61lbum_artist\x18\n \x01(\t\x12\x17\n\x0f\x63\x61nonical_album\x18\x38 \x01(\t\x12\x18\n\x10\x63\x61nonical_artist\x18\x39 \x01(\t\x12\x1d\n\x15\x63\x61nonical_genre_album\x18: \x01(\t\x12\x0c\n\x04year\x18\x0b \x01(\x05\x12\x0f\n\x07\x63omment\x18\x0c \x01(\t\x12\x14\n\x0ctrack_number\x18\r \x01(\x05\x12\r\n\x05genre\x18\x0e \x01(\t\x12\x17\n\x0f\x64uration_millis\x18\x0f \x01(\x03\x12\x18\n\x10\x62\x65\x61ts_per_minute\x18\x10 \x01(\x05\x12\x19\n\x11original_bit_rate\x18, \x01(\x05\x12\x1c\n\taudio_ref\x18\x11 \x03(\x0b\x32\t.AudioRef\x12 \n\ralbum_art_ref\x18\x12 \x03(\x0b\x32\t.ImageRef\x12\x36\n\x13\x61vailability_status\x18\x13 \x01(\x0e\x32\x19.Track.AvailabilityStatus\x12\x12\n\nplay_count\x18\x14 \x01(\x05\x12(\n\x0c\x63ontent_type\x18\x19 \x01(\x0e\x32\x12.Track.ContentType\x12\x19\n\x11total_track_count\x18\x1a \x01(\x05\x12\x13\n\x0b\x64isc_number\x18\x1b \x01(\x05\x12\x18\n\x10total_disc_count\x18\x1c \x01(\x05\x12!\n\x08\x63hannels\x18\x1d \x01(\x0e\x32\x0f.Track.Channels\x12$\n\ntrack_type\x18\x1e \x01(\x0e\x32\x10.Track.TrackType\x12\x1e\n\x16use_single_server_copy\x18; \x01(\x08\x12\x1d\n\x06rating\x18\x1f \x01(\x0e\x32\r.Track.Rating\x12\x16\n\x0e\x65stimated_size\x18 \x01(\x03\x12\x10\n\x08store_id\x18! \x01(\t\x12\x12\n\nmetajam_id\x18\" \x01(\t\x12 \n\x15metajam_id_confidence\x18+ \x01(\x01:\x01\x30\x12\x0c\n\x04uits\x18# \x01(\t\x12$\n\ruits_metadata\x18( \x01(\x0b\x32\r.UitsMetadata\x12\x13\n\x0b\x63ompilation\x18$ \x01(\x08\x12\x19\n\x11\x63lient_date_added\x18% \x01(\x03\x12\x18\n\x10recent_timestamp\x18& \x01(\x03\x12\x1d\n\x0e\x64o_not_rematch\x18\' \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x13\x66rom_album_purchase\x18) \x01(\x08\x12\x18\n\x10\x61lbum_metajam_id\x18* \x01(\t\x12\x16\n\x0etransaction_id\x18- \x01(\t\x12\x13\n\x0b\x64\x65\x62ug_track\x18/ \x01(\x08\x12\x18\n\x10normalized_title\x18\x30 \x01(\t\x12\x19\n\x11normalized_artist\x18\x31 \x01(\t\x12\x18\n\x10normalized_album\x18\x32 \x01(\t\x12\x1f\n\x17normalized_album_artist\x18\x33 \x01(\t\x12\"\n\x1anormalized_canonical_album\x18\x36 \x01(\t\x12#\n\x1bnormalized_canonical_artist\x18\x37 \x01(\t\x12\x13\n\x0buploader_id\x18\x34 \x01(\t\x12\x17\n\x0f\x63lient_album_id\x18\x35 \x01(\t\x12\x18\n\x10label_owner_code\x18< \x01(\t\x12\x31\n\x15original_content_type\x18= \x01(\x0e\x32\x12.Track.ContentType\x12*\n\ruploaded_uits\x18G \x03(\x0b\x32\x13.UploadedUitsId3Tag\"\x86\x01\n\x12\x41vailabilityStatus\x12\x0b\n\x07PENDING\x10\x01\x12\x0b\n\x07MATCHED\x10\x02\x12\x14\n\x10UPLOAD_REQUESTED\x10\x03\x12\r\n\tAVAILABLE\x10\x04\x12\x12\n\x0e\x46ORCE_REUPLOAD\x10\x05\x12\x1d\n\x19UPLOAD_PERMANENTLY_FAILED\x10\x06\"W\n\x0b\x43ontentType\x12\x07\n\x03MP3\x10\x01\x12\x07\n\x03M4A\x10\x02\x12\x07\n\x03\x41\x41\x43\x10\x03\x12\x08\n\x04\x46LAC\x10\x04\x12\x07\n\x03OGG\x10\x05\x12\x07\n\x03WMA\x10\x06\x12\x07\n\x03M4P\x10\x07\x12\x08\n\x04\x41LAC\x10\x08\" \n\x08\x43hannels\x12\x08\n\x04MONO\x10\x01\x12\n\n\x06STEREO\x10\x02\"\x8b\x01\n\tTrackType\x12\x11\n\rMATCHED_TRACK\x10\x01\x12\x13\n\x0fUNMATCHED_TRACK\x10\x02\x12\x0f\n\x0bLOCAL_TRACK\x10\x03\x12\x13\n\x0fPURCHASED_TRACK\x10\x04\x12\x1f\n\x1bMETADATA_ONLY_MATCHED_TRACK\x10\x05\x12\x0f\n\x0bPROMO_TRACK\x10\x06\"e\n\x06Rating\x12\r\n\tNOT_RATED\x10\x01\x12\x0c\n\x08ONE_STAR\x10\x02\x12\r\n\tTWO_STARS\x10\x03\x12\x0f\n\x0bTHREE_STARS\x10\x04\x12\x0e\n\nFOUR_STARS\x10\x05\x12\x0e\n\nFIVE_STARS\x10\x06\"\x1f\n\x06Tracks\x12\x15\n\x05track\x18\x01 \x03(\x0b\x32\x06.Track\"\xb4\x02\n\x08Playlist\x12\n\n\x02id\x18\x01 \x01(\t\x12\x11\n\tclient_id\x18\x02 \x01(\t\x12\x1a\n\x12\x63reation_timestamp\x18\x03 \x01(\x03\x12\x1f\n\x17last_modified_timestamp\x18\x04 \x01(\x03\x12\x16\n\x07\x64\x65leted\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x0c\n\x04name\x18\x06 \x01(\t\x12-\n\rplaylist_type\x18\x07 \x01(\x0e\x32\x16.Playlist.PlaylistType\x12r.EnumValueDescriptor(
name='NOT_EQUAL', index=1, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='GREATER_THAN', index=2, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='GREATER_EQUAL', index=3, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='LESS_THAN', index=4, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='LESS_EQUAL', index=5, number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='PARTIAL_MATCH', index=6, number=6,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=3718,
serialized_end=3847,
)
_TRACKSEARCHRESTRICTIONSET_RESTRICTIONSETTYPE = descriptor.EnumDescriptor(
name='RestrictionSetType',
full_name='TrackSearchRestrictionSet.RestrictionSetType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='AND', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='OR', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=4031,
serialized_end=4068,
)
_TRACKSORTORDER_TRACKATTRIBUTE = descriptor.EnumDescriptor(
name='TrackAttribute',
full_name='TrackSortOrder.TrackAttribute',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='LAST_MODIFIED_TIME', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='ARTIST', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='ALBUM', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TITLE', index=3, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TRACK_NUMBER', index=4, number=6,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='PLAY_COUNT', index=5, number=9,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DURATION_MILLIS', index=6, number=10,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='RATING', index=7, number=11,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CREATION_TIME', index=8, number=12,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=4167,
serialized_end=4327,
)
_GETTRACKSREQUEST_TRACKPROJECTION = descriptor.EnumDescriptor(
name='TrackProjection',
full_name='GetTracksRequest.TrackProjection',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='FULL', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='FRONTEND_VIEW', index=1, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=4666,
serialized_end=4712,
)
_GETTRACKSRESPONSE_RESPONSECODE = descriptor.EnumDescriptor(
name='ResponseCode',
full_name='GetTracksResponse.ResponseCode',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='OK', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='NOT_MODIFIED', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='GONE', index=2, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=4876,
serialized_end=4926,
)
_GETPLAYLISTENTRIESRESPONSE_RESPONSECODE = descriptor.EnumDescriptor(
name='ResponseCode',
full_name='GetPlaylistEntriesResponse.ResponseCode',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='OK', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='NOT_MODIFIED', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='GONE', index=2, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=4876,
serialized_end=4926,
)
_PLAYLISTSORTORDER_PLAYLISTATTRIBUTE = descriptor.EnumDescriptor(
name='PlaylistAttribute',
full_name='PlaylistSortOrder.PlaylistAttribute',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='LAST_MODIFIED_TIME', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TITLE', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CREATION_TIME', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='RECENT_TIMESTAMP', index=3, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=5538,
serialized_end=5633,
)
_GETPLAYLISTSRESPONSE_RESPONSECODE = descriptor.EnumDescriptor(
name='ResponseCode',
full_name='GetPlaylistsResponse.ResponseCode',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='OK', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='NOT_MODIFIED', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='GONE', index=2, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=4876,
serialized_end=4926,
)
_BATCHLOOKUPREQUEST_METADATATYPE = descriptor.EnumDescriptor(
name='MetadataType',
full_name='BatchLookupRequest.MetadataType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='TRACK', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='PLAYLIST', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='PLAYLIST_ENTRY', index=2, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=6467,
serialized_end=6526,
)
_MUTATERESPONSE_MUTATERESPONSECODE = descriptor.EnumDescriptor(
name='MutateResponseCode',
full_name='MutateResponse.MutateResponseCode',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='OK', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CONFLICT', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='INVALID_REQUEST', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='METADATA_TOO_LARGE', index=3, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=7047,
serialized_end=7134,
)
_MUTATERESPONSE_AVAILABILITYSTATUS = descriptor.EnumDescriptor(
name='AvailabilityStatus',
full_name='MutateResponse.AvailabilityStatus',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='PENDING', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='MATCHED', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='UPLOAD_REQUESTED', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='AVAILABLE', index=3, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='FORCE_REUPLOAD', index=4, number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='UPLOAD_PERMANENTLY_FAILED', index=5, number=6,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2101,
serialized_end=2235,
)
_BATCHMUTATETRACKSRESPONSE_BATCHMUTATETRACKSRESPONSECODE = descriptor.EnumDescriptor(
name='BatchMutateTracksResponseCode',
full_name='BatchMutateTracksResponse.BatchMutateTracksResponseCode',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='OK', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CONFLICT', index=1, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=7634,
serialized_end=7687,
)
_BATCHMUTATEPLAYLISTSRESPONSE_BATCHMUTATEPLAYLISTSRESPONSECODE = descriptor.EnumDescriptor(
name='BatchMutatePlaylistsResponseCode',
full_name='BatchMutatePlaylistsResponse.BatchMutatePlaylistsResponseCode',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='OK', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CONFLICT', index=1, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=8319,
serialized_end=8375,
)
_BATCHMUTATEPLAYLISTENTRIESRESPONSE_BATCHMUTATEPLAYLISTENTRIESRESPONSECODE = descriptor.EnumDescriptor(
name='BatchMutatePlaylistEntriesResponseCode',
full_name='BatchMutatePlaylistEntriesResponse.BatchMutatePlaylistEntriesResponseCode',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='OK', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CONFLICT', index=1, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=9033,
serialized_end=9095,
)
_MAGICPLAYLISTSEED_SEEDTYPE = descriptor.EnumDescriptor(
name='SeedType',
full_name='MagicPlaylistSeed.SeedType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='TRACK', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='ARTIST', index=1, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='ALBUM', index=2, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='OPAQUE_SEED', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=9181,
serialized_end=9242,
)
_ALBUMSORTORDER_ALBUMATTRIBUTE = descriptor.EnumDescriptor(
name='AlbumAttribute',
full_name='AlbumSortOrder.AlbumAttribute',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='LAST_PLAYED_TIME', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='NAME', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CREATION_TIME', index=2, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=10312,
serialized_end=10379,
)
_GETDYNAMICPLAYLISTENTRIESREQUEST_DYNAMICPLAYLISTENTRIESTYPE = descriptor.EnumDescriptor(
name='DynamicPlaylistEntriesType',
full_name='GetDynamicPlaylistEntriesRequest.DynamicPlaylistEntriesType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='PURCHASED', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='THUMBS_UP', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='RECENTLY_ADDED', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='PROMOTED', index=3, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='PROMOTED_AND_PURCHASED', index=4, number=5,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=11290,
serialized_end=11410,
)
_GETDYNAMICPLAYLISTENTRIESRESPONSE_DYNAMICPLAYLISTENTRIESTYPE = descriptor.EnumDescriptor(
name='DynamicPlaylistEntriesType',
full_name='GetDynamicPlaylistEntriesResponse.DynamicPlaylistEntriesType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='PURCHASED', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='THUMBS_UP', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='RECENTLY_ADDED', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='PROMOTED', index=3, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='UNKNOWN', index=4, number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='PROMOTED_AND_PURCHASED', index=5, number=6,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=11718,
serialized_end=11851,
)
_GETDYNAMICPLAYLISTENTRIESRESPONSE_RESPONSECODE = descriptor.EnumDescriptor(
name='ResponseCode',
full_name='GetDynamicPlaylistEntriesResponse.ResponseCode',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='OK', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='NOT_OK', index=1, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=11853,
serialized_end=11887,
)
_TRACKTYPEAGGREGATE_TRACKTYPE = descriptor.EnumDescriptor(
name='TrackType',
full_name='TrackTypeAggregate.TrackType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='MATCHED_TRACK', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='UNMATCHED_TRACK', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='LOCAL_TRACK', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='PURCHASED_TRACK', index=3, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='METADATA_ONLY_MATCHED_TRACK', index=4, number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='PROMO_TRACK', index=5, number=6,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2361,
serialized_end=2500,
)
_AVAILABILITYSTATUSAGGREGATE_AVAILABILITYSTATUS = descriptor.EnumDescriptor(
name='AvailabilityStatus',
full_name='AvailabilityStatusAggregate.AvailabilityStatus',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='PENDING', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='MATCHED', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='UPLOAD_REQUESTED', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='AVAILABLE', index=3, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='FORCE_REUPLOAD', index=4, number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='UPLOAD_PERMANENTLY_FAILED', index=5, number=6,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2101,
serialized_end=2235,
)
_REMOTECONTROLCOMMANDRESPONSE_RESPONSECODE = descriptor.EnumDescriptor(
name='ResponseCode',
full_name='RemoteControlCommandResponse.ResponseCode',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='OK', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='NO_PUBLISHER', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='INVALID_REQUEST', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='PUBLISH_ERROR', index=3, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=13274,
serialized_end=13354,
)
_AUDIOREF = descriptor.Descriptor(
name='AudioRef',
full_name='AudioRef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='store', full_name='AudioRef.store', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='ref', full_name='AudioRef.ref', index=1,
number=2, type=12, cpp_type=9, label=2,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='url', full_name='AudioRef.url', index=2,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='bit_rate', full_name='AudioRef.bit_rate', index=3,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='sample_rate', full_name='AudioRef.sample_rate', index=4,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='downloadable', full_name='AudioRef.downloadable', index=5,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='duration_millis', full_name='AudioRef.duration_millis', index=6,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='rematch_timestamp', full_name='AudioRef.rematch_timestamp', index=7,
number=9, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='invalid_due_to_wipeout', full_name='AudioRef.invalid_due_to_wipeout', index=8,
number=10, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_AUDIOREF_STORE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=29,
serialized_end=277,
)
_IMAGEREF = descriptor.Descriptor(
name='ImageRef',
full_name='ImageRef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='store', full_name='ImageRef.store', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=3,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='width', full_name='ImageRef.width', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='height', full_name='ImageRef.height', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='url', full_name='ImageRef.url', index=3,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='invalid_due_to_wipeout', full_name='ImageRef.invalid_due_to_wipeout', index=4,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='origin', full_name='ImageRef.origin', index=5,
number=8, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_IMAGEREF_STORE,
_IMAGEREF_ORIGIN,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=280,
serialized_end=489,
)
_UPLOADEDUITSID3TAG = descriptor.Descriptor(
name='UploadedUitsId3Tag',
full_name='UploadedUitsId3Tag',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='owner', full_name='UploadedUitsId3Tag.owner', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='data', full_name='UploadedUitsId3Tag.data', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=491,
serialized_end=540,
)
_TRACK = descriptor.Descriptor(
name='Track',
full_name='Track',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='id', full_name='Track.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='client_id', full_name='Track.client_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='creation_timestamp', full_name='Track.creation_timestamp', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='last_modified_timestamp', full_name='Track.last_modified_timestamp', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='deleted', full_name='Track.deleted', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='title', full_name='Track.title', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='artist', full_name='Track.artist', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='artist_hash', full_name='Track.artist_hash', index=7,
number=46, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='composer', full_name='Track.composer', index=8,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='album', full_name='Track.album', index=9,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='album_artist', full_name='Track.album_artist', index=10,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='canonical_album', full_name='Track.canonical_album', index=11,
number=56, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='canonical_artist', full_name='Track.canonical_artist', index=12,
number=57, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='canonical_genre_album', full_name='Track.canonical_genre_album', index=13,
number=58, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='year', full_name='Track.year', index=14,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='comment', full_name='Track.comment', index=15,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='track_number', full_name='Track.track_number', index=16,
number=13, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='genre', full_name='Track.genre', index=17,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='duration_millis', full_name='Track.duration_millis', index=18,
number=15, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='beats_per_minute', full_name='Track.beats_per_minute', index=19,
number=16, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='original_bit_rate', full_name='Track.original_bit_rate', index=20,
number=44, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='audio_ref', full_name='Track.audio_ref', index=21,
number=17, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='album_art_ref', full_name='Track.album_art_ref', index=22,
number=18, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='availability_status', full_name='Track.availability_status', index=23,
number=19, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='play_count', full_name='Track.play_count', index=24,
number=20, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='content_type', full_name='Track.content_type', index=25,
number=25, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='total_track_count', full_name='Track.total_track_count', index=26,
number=26, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='disc_number', full_name='Track.disc_number', index=27,
number=27, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='total_disc_count', full_name='Track.total_disc_count', index=28,
number=28, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='channels', full_name='Track.channels', index=29,
number=29, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='track_type', full_name='Track.track_type', index=30,
number=30, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='use_single_server_copy', full_name='Track.use_single_server_copy', index=31,
number=59, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='rating', full_name='Track.rating', index=32,
number=31, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='estimated_size', full_name='Track.estimated_size', index=33,
number=32, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='store_id', full_name='Track.store_id', index=34,
number=33, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='metajam_id', full_name='Track.metajam_id', index=35,
number=34, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='metajam_id_confidence', full_name='Track.metajam_id_confidence', index=36,
number=43, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='uits', full_name='Track.uits', index=37,
number=35, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='uits_metadata', full_name='Track.uits_metadata', index=38,
number=40, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='compilation', full_name='Track.compilation', index=39,
number=36, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='client_date_added', full_name='Track.client_date_added', index=40,
number=37, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='recent_timestamp', full_name='Track.recent_timestamp', index=41,
number=38, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='do_not_rematch', full_name='Track.do_not_rematch', index=42,
number=39, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='from_album_purchase', full_name='Track.from_album_purchase', index=43,
number=41, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='album_metajam_id', full_name='Track.album_metajam_id', index=44,
number=42, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='transaction_id', full_name='Track.transaction_id', index=45,
number=45, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='debug_track', full_name='Track.debug_track', index=46,
number=47, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='normalized_title', full_name='Track.normalized_title', index=47,
number=48, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='normalized_artist', full_name='Track.normalized_artist', index=48,
number=49, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='normalized_album', full_name='Track.normalized_album', index=49,
number=50, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='normalized_album_artist', full_name='Track.normalized_album_artist', index=50,
number=51, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='normalized_canonical_album', full_name='Track.normalized_canonical_album', index=51,
number=54, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='normalized_canonical_artist', full_name='Track.normalized_canonical_artist', index=52,
number=55, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='uploader_id', full_name='Track.uploader_id', index=53,
number=52, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='client_album_id', full_name='Track.client_album_id', index=54,
number=53, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='label_owner_code', full_name='Track.label_owner_code', index=55,
number=60, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='original_content_type', full_name='Track.original_content_type', index=56,
number=61, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='uploaded_uits', full_name='Track.uploaded_uits', index=57,
number=71, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_TRACK_AVAILABILITYSTATUS,
_TRACK_CONTENTTYPE,
_TRACK_CHANNELS,
_TRACK_TRACKTYPE,
_TRACK_RATING,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=543,
serialized_end=2603,
)
_TRACKS = descriptor.Descriptor(
name='Tracks',
full_name='Tracks',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='track', full_name='Tracks.track', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2605,
serialized_end=2636,
)
_PLAYLIST = descriptor.Descriptor(
name='Playlist',
full_name='Playlist',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='id', full_name='Playlist.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='client_id', full_name='Playlist.client_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='creation_timestamp', full_name='Playlist.creation_timestamp', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='last_modified_timestamp', full_name='Playlist.last_modified_timestamp', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='deleted', full_name='Playlist.deleted', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='name', full_name='Playlist.name', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_type', full_name='Playlist.playlist_type', index=6,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_art_ref', full_name='Playlist.playlist_art_ref', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='recent_timestamp', full_name='Playlist.recent_timestamp', index=8,
number=9, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_PLAYLIST_PLAYLISTTYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2639,
serialized_end=2947,
)
_PLAYLISTENTRY = descriptor.Descriptor(
name='PlaylistEntry',
full_name='PlaylistEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='playlist_id', full_name='PlaylistEntry.playlist_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='absolute_position', full_name='PlaylistEntry.absolute_position', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='place_after_entry_id', full_name='PlaylistEntry.place_after_entry_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='track_id', full_name='PlaylistEntry.track_id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='id', full_name='PlaylistEntry.id', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='client_id', full_name='PlaylistEntry.client_id', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='creation_timestamp', full_name='PlaylistEntry.creation_timestamp', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='last_modified_timestamp', full_name='PlaylistEntry.last_modified_timestamp', index=7,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='deleted', full_name='PlaylistEntry.deleted', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='relative_position_id_type', full_name='PlaylistEntry.relative_position_id_type', index=9,
number=10, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='track', full_name='PlaylistEntry.track', index=10,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='place_before_entry_id', full_name='PlaylistEntry.place_before_entry_id', index=11,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='string_position', full_name='PlaylistEntry.string_position', index=12,
number=17, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_PLAYLISTENTRY_RELATIVEPOSITIONIDTYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2950,
serialized_end=3380,
)
_TRACKSEARCHRESTRICTION = descriptor.Descriptor(
name='TrackSearchRestriction',
full_name='TrackSearchRestriction',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='attribute', full_name='TrackSearchRestriction.attribute', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='value', full_name='TrackSearchRestriction.value', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='comparison_type', full_name='TrackSearchRestriction.comparison_type', index=2,
number=3, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_TRACKSEARCHRESTRICTION_TRACKATTRIBUTE,
_TRACKSEARCHRESTRICTION_COMPARISONTYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3383,
serialized_end=3847,
)
_TRACKSEARCHRESTRICTIONSET = descriptor.Descriptor(
name='TrackSearchRestrictionSet',
full_name='TrackSearchRestrictionSet',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='type', full_name='TrackSearchRestrictionSet.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='restriction', full_name='TrackSearchRestrictionSet.restriction', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='sub_set', full_name='TrackSearchRestrictionSet.sub_set', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_TRACKSEARCHRESTRICTIONSET_RESTRICTIONSETTYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3850,
serialized_end=4068,
)
_TRACKSORTORDER = descriptor.Descriptor(
name='TrackSortOrder',
full_name='TrackSortOrder',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='attribute', full_name='TrackSortOrder.attribute', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='descending', full_name='TrackSortOrder.descending', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_TRACKSORTORDER_TRACKATTRIBUTE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=4071,
serialized_end=4327,
)
_GETTRACKSREQUEST = descriptor.Descriptor(
name='GetTracksRequest',
full_name='GetTracksRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='GetTracksRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='updated_min', full_name='GetTracksRequest.updated_min', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='include_deleted', full_name='GetTracksRequest.include_deleted', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_results', full_name='GetTracksRequest.max_results', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='continuation_token', full_name='GetTracksRequest.continuation_token', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='search_restriction', full_name='GetTracksRequest.search_restriction', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='sort_order', full_name='GetTracksRequest.sort_order', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='restriction_set', full_name='GetTracksRequest.restriction_set', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='track_projection', full_name='GetTracksRequest.track_projection', index=8,
number=9, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETTRACKSREQUEST_TRACKPROJECTION,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=4330,
serialized_end=4712,
)
_GETTRACKSRESPONSE = descriptor.Descriptor(
name='GetTracksResponse',
full_name='GetTracksResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='response_code', full_name='GetTracksResponse.response_code', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='track', full_name='GetTracksResponse.track', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='estimated_total_results', full_name='GetTracksResponse.estimated_total_results', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='continuation_token', full_name='GetTracksResponse.continuation_token', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETTRACKSRESPONSE_RESPONSECODE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=4715,
serialized_end=4926,
)
_GETPLAYLISTENTRIESREQUEST = descriptor.Descriptor(
name='GetPlaylistEntriesRequest',
full_name='GetPlaylistEntriesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='GetPlaylistEntriesRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='updated_min', full_name='GetPlaylistEntriesRequest.updated_min', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='include_deleted', full_name='GetPlaylistEntriesRequest.include_deleted', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_results', full_name='GetPlaylistEntriesRequest.max_results', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='continuation_token', full_name='GetPlaylistEntriesRequest.continuation_token', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_id_filter', full_name='GetPlaylistEntriesRequest.playlist_id_filter', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='include_all_track_metadata', full_name='GetPlaylistEntriesRequest.include_all_track_metadata', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='only_show_available_tracks', full_name='GetPlaylistEntriesRequest.only_show_available_tracks', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=4929,
serialized_end=5181,
)
_GETPLAYLISTENTRIESRESPONSE = descriptor.Descriptor(
name='GetPlaylistEntriesResponse',
full_name='GetPlaylistEntriesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='response_code', full_name='GetPlaylistEntriesResponse.response_code', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_entry', full_name='GetPlaylistEntriesResponse.playlist_entry', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='estimated_total_results', full_name='GetPlaylistEntriesResponse.estimated_total_results', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='continuation_token', full_name='GetPlaylistEntriesResponse.continuation_token', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETPLAYLISTENTRIESRESPONSE_RESPONSECODE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5184,
serialized_end=5430,
)
_PLAYLISTSORTORDER = descriptor.Descriptor(
name='PlaylistSortOrder',
full_name='PlaylistSortOrder',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='attribute', full_name='PlaylistSortOrder.attribute', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='descending', full_name='PlaylistSortOrder.descending', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_PLAYLISTSORTORDER_PLAYLISTATTRIBUTE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5433,
serialized_end=5633,
)
_GETPLAYLISTSREQUEST = descriptor.Descriptor(
name='GetPlaylistsRequest',
full_name='GetPlaylistsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='GetPlaylistsRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='updated_min', full_name='GetPlaylistsRequest.updated_min', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='include_deleted', full_name='GetPlaylistsRequest.include_deleted', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_results', full_name='GetPlaylistsRequest.max_results', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='continuation_token', full_name='GetPlaylistsRequest.continuation_token', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='sort_order', full_name='GetPlaylistsRequest.sort_order', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5636,
serialized_end=5809,
)
_GETPLAYLISTSRESPONSE = descriptor.Descriptor(
name='GetPlaylistsResponse',
full_name='GetPlaylistsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='response_code', full_name='GetPlaylistsResponse.response_code', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist', full_name='GetPlaylistsResponse.playlist', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='estimated_total_results', full_name='GetPlaylistsResponse.estimated_total_results', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='continuation_token', full_name='GetPlaylistsResponse.continuation_token', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETPLAYLISTSRESPONSE_RESPONSECODE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5812,
serialized_end=6035,
)
_LOOKUPTRACKREQUEST = descriptor.Descriptor(
name='LookupTrackRequest',
full_name='LookupTrackRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='id', full_name='LookupTrackRequest.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='client_id', full_name='LookupTrackRequest.client_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6037,
serialized_end=6088,
)
_LOOKUPPLAYLISTENTRYREQUEST = descriptor.Descriptor(
name='LookupPlaylistEntryRequest',
full_name='LookupPlaylistEntryRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='id', full_name='LookupPlaylistEntryRequest.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='client_id', full_name='LookupPlaylistEntryRequest.client_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6090,
serialized_end=6149,
)
_LOOKUPPLAYLISTREQUEST = descriptor.Descriptor(
name='LookupPlaylistRequest',
full_name='LookupPlaylistRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='id', full_name='LookupPlaylistRequest.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='client_id', full_name='LookupPlaylistRequest.client_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6151,
serialized_end=6205,
)
_BATCHLOOKUPREQUEST = descriptor.Descriptor(
name='BatchLookupRequest',
full_name='BatchLookupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='BatchLookupRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='track', full_name='BatchLookupRequest.track', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist', full_name='BatchLookupRequest.playlist', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='metadata_type', full_name='BatchLookupRequest.metadata_type', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_entry', full_name='BatchLookupRequest.playlist_entry', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='include_deleted', full_name='BatchLookupRequest.include_deleted', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_BATCHLOOKUPREQUEST_METADATATYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6208,
serialized_end=6526,
)
_BATCHLOOKUPRESPONSE = descriptor.Descriptor(
name='BatchLookupResponse',
full_name='BatchLookupResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='track', full_name='BatchLookupResponse.track', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist', full_name='BatchLookupResponse.playlist', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_entry', full_name='BatchLookupResponse.playlist_entry', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6528,
serialized_end=6641,
)
_MUTATETRACKREQUEST = descriptor.Descriptor(
name='MutateTrackRequest',
full_name='MutateTrackRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='create_track', full_name='MutateTrackRequest.create_track', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='update_track', full_name='MutateTrackRequest.update_track', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='delete_track', full_name='MutateTrackRequest.delete_track', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='partial_update', full_name='MutateTrackRequest.partial_update', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='update_last_modified', full_name='MutateTrackRequest.update_last_modified', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='undelete_track', full_name='MutateTrackRequest.undelete_track', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6644,
serialized_end=6830,
)
_MUTATERESPONSE = descriptor.Descriptor(
name='MutateResponse',
full_name='MutateResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='response_code', full_name='MutateResponse.response_code', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='id', full_name='MutateResponse.id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='child_id', full_name='MutateResponse.child_id', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='client_id', full_name='MutateResponse.client_id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='availability_status', full_name='MutateResponse.availability_status', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='error_message', full_name='MutateResponse.error_message', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_MUTATERESPONSE_MUTATERESPONSECODE,
_MUTATERESPONSE_AVAILABILITYSTATUS,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6833,
serialized_end=7271,
)
_BATCHMUTATETRACKSREQUEST = descriptor.Descriptor(
name='BatchMutateTracksRequest',
full_name='BatchMutateTracksRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='BatchMutateTracksRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='track_mutation', full_name='BatchMutateTracksRequest.track_mutation', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='send_notification', full_name='BatchMutateTracksRequest.send_notification', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='detect_timestamp_conflict', full_name='BatchMutateTracksRequest.detect_timestamp_conflict', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='notify_fine_grained_updates', full_name='BatchMutateTracksRequest.notify_fine_grained_updates', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=7274,
serialized_end=7479,
)
_BATCHMUTATETRACKSRESPONSE = descriptor.Descriptor(
name='BatchMutateTracksResponse',
full_name='BatchMutateTracksResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='response_code', full_name='BatchMutateTracksResponse.response_code', index=0,
number=1, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='mutate_response', full_name='BatchMutateTracksResponse.mutate_response', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_BATCHMUTATETRACKSRESPONSE_BATCHMUTATETRACKSRESPONSECODE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=7482,
serialized_end=7687,
)
_MUTATEPLAYLISTREQUEST = descriptor.Descriptor(
name='MutatePlaylistRequest',
full_name='MutatePlaylistRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='create_playlist', full_name='MutatePlaylistRequest.create_playlist', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='update_playlist', full_name='MutatePlaylistRequest.update_playlist', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='delete_playlist', full_name='MutatePlaylistRequest.delete_playlist', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='partial_update', full_name='MutatePlaylistRequest.partial_update', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_entry', full_name='MutatePlaylistRequest.playlist_entry', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='update_last_modified', full_name='MutatePlaylistRequest.update_last_modified', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='undelete_playlist', full_name='MutatePlaylistRequest.undelete_playlist', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=7690,
serialized_end=7937,
)
_BATCHMUTATEPLAYLISTSREQUEST = descriptor.Descriptor(
name='BatchMutatePlaylistsRequest',
full_name='BatchMutatePlaylistsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='BatchMutatePlaylistsRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_mutation', full_name='BatchMutatePlaylistsRequest.playlist_mutation', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='send_notification', full_name='BatchMutatePlaylistsRequest.send_notification', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='detect_timestamp_conflict', full_name='BatchMutatePlaylistsRequest.detect_timestamp_conflict', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='notify_fine_grained_updates', full_name='BatchMutatePlaylistsRequest.notify_fine_grained_updates', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=7940,
serialized_end=8155,
)
_BATCHMUTATEPLAYLISTSRESPONSE = descriptor.Descriptor(
name='BatchMutatePlaylistsResponse',
full_name='BatchMutatePlaylistsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='response_code', full_name='BatchMutatePlaylistsResponse.response_code', index=0,
number=1, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='mutate_response', full_name='BatchMutatePlaylistsResponse.mutate_response', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_BATCHMUTATEPLAYLISTSRESPONSE_BATCHMUTATEPLAYLISTSRESPONSECODE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=8158,
serialized_end=8375,
)
_MUTATEPLAYLISTENTRYREQUEST = descriptor.Descriptor(
name='MutatePlaylistEntryRequest',
full_name='MutatePlaylistEntryRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='create_playlist_entry', full_name='MutatePlaylistEntryRequest.create_playlist_entry', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='update_playlist_entry', full_name='MutatePlaylistEntryRequest.update_playlist_entry', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='delete_playlist_entry', full_name='MutatePlaylistEntryRequest.delete_playlist_entry', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='update_last_modified', full_name='MutatePlaylistEntryRequest.update_last_modified', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='undelete_playlist_entry', full_name='MutatePlaylistEntryRequest.undelete_playlist_entry', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=8378,
serialized_end=8616,
)
_BATCHMUTATEPLAYLISTENTRIESREQUEST = descriptor.Descriptor(
name='BatchMutatePlaylistEntriesRequest',
full_name='BatchMutatePlaylistEntriesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='BatchMutatePlaylistEntriesRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_entry_mutation', full_name='BatchMutatePlaylistEntriesRequest.playlist_entry_mutation', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='send_notification', full_name='BatchMutatePlaylistEntriesRequest.send_notification', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='detect_timestamp_conflict', full_name='BatchMutatePlaylistEntriesRequest.detect_timestamp_conflict', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='notify_fine_grained_updates', full_name='BatchMutatePlaylistEntriesRequest.notify_fine_grained_updates', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=8619,
serialized_end=8851,
)
_BATCHMUTATEPLAYLISTENTRIESRESPONSE = descriptor.Descriptor(
name='BatchMutatePlaylistEntriesResponse',
full_name='BatchMutatePlaylistEntriesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='response_code', full_name='BatchMutatePlaylistEntriesResponse.response_code', index=0,
number=1, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='mutate_response', full_name='BatchMutatePlaylistEntriesResponse.mutate_response', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_BATCHMUTATEPLAYLISTENTRIESRESPONSE_BATCHMUTATEPLAYLISTENTRIESRESPONSECODE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=8854,
serialized_end=9095,
)
_MAGICPLAYLISTSEED = descriptor.Descriptor(
name='MagicPlaylistSeed',
full_name='MagicPlaylistSeed',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='seed_type', full_name='MagicPlaylistSeed.seed_type', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='seed', full_name='MagicPlaylistSeed.seed', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_MAGICPLAYLISTSEED_SEEDTYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=9098,
serialized_end=9242,
)
_MAGICPLAYLISTREQUEST = descriptor.Descriptor(
name='MagicPlaylistRequest',
full_name='MagicPlaylistRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='MagicPlaylistRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_name', full_name='MagicPlaylistRequest.playlist_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_id', full_name='MagicPlaylistRequest.playlist_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='seed', full_name='MagicPlaylistRequest.seed', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='num_recommendations', full_name='MagicPlaylistRequest.num_recommendations', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='include_all_track_metadata', full_name='MagicPlaylistRequest.include_all_track_metadata', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='model_name', full_name='MagicPlaylistRequest.model_name', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=9245,
serialized_end=9454,
)
_MAGICPLAYLISTRESPONSE = descriptor.Descriptor(
name='MagicPlaylistResponse',
full_name='MagicPlaylistResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='playlist', full_name='MagicPlaylistResponse.playlist', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_entry', full_name='MagicPlaylistResponse.playlist_entry', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=9456,
serialized_end=9548,
)
_FLUSHLOCKERREQUEST = descriptor.Descriptor(
name='FlushLockerRequest',
full_name='FlushLockerRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='FlushLockerRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='gaia_cookie', full_name='FlushLockerRequest.gaia_cookie', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='remove_audio_binaries', full_name='FlushLockerRequest.remove_audio_binaries', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='remove_image_binaries', full_name='FlushLockerRequest.remove_image_binaries', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='send_notification', full_name='FlushLockerRequest.send_notification', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='reset_subscription_type', full_name='FlushLockerRequest.reset_subscription_type', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='notify_fine_grained_updates', full_name='FlushLockerRequest.notify_fine_grained_updates', index=6,
number=8, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=9551,
serialized_end=9799,
)
_FLUSHLOCKERRESPONSE = descriptor.Descriptor(
name='FlushLockerResponse',
full_name='FlushLockerResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='tracks_removed', full_name='FlushLockerResponse.tracks_removed', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='entries_removed', full_name='FlushLockerResponse.entries_removed', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlists_removed', full_name='FlushLockerResponse.playlists_removed', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='success_reset_subscription_type', full_name='FlushLockerResponse.success_reset_subscription_type', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=9802,
serialized_end=9940,
)
_LOCKERNOTIFICATION = descriptor.Descriptor(
name='LockerNotification',
full_name='LockerNotification',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='LockerNotification.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='payload', full_name='LockerNotification.payload', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=9942,
serialized_end=9996,
)
_ALBUM = descriptor.Descriptor(
name='Album',
full_name='Album',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='Album.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='album_artist', full_name='Album.album_artist', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='album_art', full_name='Album.album_art', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='track_count', full_name='Album.track_count', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='last_time_played', full_name='Album.last_time_played', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='is_compilation', full_name='Album.is_compilation', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='album_metajam_id', full_name='Album.album_metajam_id', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='creation_timestamp', full_name='Album.creation_timestamp', index=7,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='artist', full_name='Album.artist', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=9999,
serialized_end=10213,
)
_ALBUMSORTORDER = descriptor.Descriptor(
name='AlbumSortOrder',
full_name='AlbumSortOrder',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='attribute', full_name='AlbumSortOrder.attribute', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='descending', full_name='AlbumSortOrder.descending', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_ALBUMSORTORDER_ALBUMATTRIBUTE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=10216,
serialized_end=10379,
)
_GETALBUMSREQUEST = descriptor.Descriptor(
name='GetAlbumsRequest',
full_name='GetAlbumsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='GetAlbumsRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='sort_order', full_name='GetAlbumsRequest.sort_order', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_results', full_name='GetAlbumsRequest.max_results', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=10381,
serialized_end=10474,
)
_GETALBUMSRESPONSE = descriptor.Descriptor(
name='GetAlbumsResponse',
full_name='GetAlbumsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='album', full_name='GetAlbumsResponse.album', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=10476,
serialized_end=10518,
)
_ARTIST = descriptor.Descriptor(
name='Artist',
full_name='Artist',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='Artist.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='total_track_count', full_name='Artist.total_track_count', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='album', full_name='Artist.album', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=10520,
serialized_end=10592,
)
_ARTISTSORTORDER = descriptor.Descriptor(
name='ArtistSortOrder',
full_name='ArtistSortOrder',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='descending', full_name='ArtistSortOrder.descending', index=0,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=10594,
serialized_end=10638,
)
_GETARTISTSREQUEST = descriptor.Descriptor(
name='GetArtistsRequest',
full_name='GetArtistsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='GetArtistsRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='sort_order', full_name='GetArtistsRequest.sort_order', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_results', full_name='GetArtistsRequest.max_results', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=10640,
serialized_end=10735,
)
_GETARTISTSRESPONSE = descriptor.Descriptor(
name='GetArtistsResponse',
full_name='GetArtistsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='artist', full_name='GetArtistsResponse.artist', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=10737,
serialized_end=10782,
)
_MUSICGENRE = descriptor.Descriptor(
name='MusicGenre',
full_name='MusicGenre',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='MusicGenre.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='total_track_count', full_name='MusicGenre.total_track_count', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='album', full_name='MusicGenre.album', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=10784,
serialized_end=10860,
)
_GENRESORTORDER = descriptor.Descriptor(
name='GenreSortOrder',
full_name='GenreSortOrder',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='descending', full_name='GenreSortOrder.descending', index=0,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=10862,
serialized_end=10905,
)
_GETGENRESREQUEST = descriptor.Descriptor(
name='GetGenresRequest',
full_name='GetGenresRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='GetGenresRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='sort_order', full_name='GetGenresRequest.sort_order', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_results', full_name='GetGenresRequest.max_results', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=10907,
serialized_end=11000,
)
_GETGENRESRESPONSE = descriptor.Descriptor(
name='GetGenresResponse',
full_name='GetGenresResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='genre', full_name='GetGenresResponse.genre', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=11002,
serialized_end=11049,
)
_GETDYNAMICPLAYLISTENTRIESREQUEST = descriptor.Descriptor(
name='GetDynamicPlaylistEntriesRequest',
full_name='GetDynamicPlaylistEntriesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='GetDynamicPlaylistEntriesRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_entries_type', full_name='GetDynamicPlaylistEntriesRequest.playlist_entries_type', index=1,
number=4, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_results', full_name='GetDynamicPlaylistEntriesRequest.max_results', index=2,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='continuation_token', full_name='GetDynamicPlaylistEntriesRequest.continuation_token', index=3,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='include_all_track_metadata', full_name='GetDynamicPlaylistEntriesRequest.include_all_track_metadata', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETDYNAMICPLAYLISTENTRIESREQUEST_DYNAMICPLAYLISTENTRIESTYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=11052,
serialized_end=11410,
)
_GETDYNAMICPLAYLISTENTRIESRESPONSE = descriptor.Descriptor(
name='GetDynamicPlaylistEntriesResponse',
full_name='GetDynamicPlaylistEntriesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='response_code', full_name='GetDynamicPlaylistEntriesResponse.response_code', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_entry', full_name='GetDynamicPlaylistEntriesResponse.playlist_entry', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='estimated_total_results', full_name='GetDynamicPlaylistEntriesResponse.estimated_total_results', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='continuation_token', full_name='GetDynamicPlaylistEntriesResponse.continuation_token', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playlist_entries_type', full_name='GetDynamicPlaylistEntriesResponse.playlist_entries_type', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETDYNAMICPLAYLISTENTRIESRESPONSE_DYNAMICPLAYLISTENTRIESTYPE,
_GETDYNAMICPLAYLISTENTRIESRESPONSE_RESPONSECODE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=11413,
serialized_end=11887,
)
_GETAGGREGATIONSBYTRACKTYPEREQUEST = descriptor.Descriptor(
name='GetAggregationsByTrackTypeRequest',
full_name='GetAggregationsByTrackTypeRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='GetAggregationsByTrackTypeRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=11889,
serialized_end=11941,
)
_TRACKTYPEAGGREGATE = descriptor.Descriptor(
name='TrackTypeAggregate',
full_name='TrackTypeAggregate',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='track_type_value', full_name='TrackTypeAggregate.track_type_value', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='count', full_name='TrackTypeAggregate.count', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_TRACKTYPEAGGREGATE_TRACKTYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=11944,
serialized_end=12178,
)
_GETAGGREGATIONSBYTRACKTYPERESPONSE = descriptor.Descriptor(
name='GetAggregationsByTrackTypeResponse',
full_name='GetAggregationsByTrackTypeResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='track_type_aggregate', full_name='GetAggregationsByTrackTypeResponse.track_type_aggregate', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=12180,
serialized_end=12267,
)
_GETAGGREGATIONSBYAVAILABILITYSTATUSREQUEST = descriptor.Descriptor(
name='GetAggregationsByAvailabilityStatusRequest',
full_name='GetAggregationsByAvailabilityStatusRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='GetAggregationsByAvailabilityStatusRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=12269,
serialized_end=12330,
)
_AVAILABILITYSTATUSAGGREGATE = descriptor.Descriptor(
name='AvailabilityStatusAggregate',
full_name='AvailabilityStatusAggregate',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='availability_status', full_name='AvailabilityStatusAggregate.availability_status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='count', full_name='AvailabilityStatusAggregate.count', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_AVAILABILITYSTATUSAGGREGATE_AVAILABILITYSTATUS,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=12333,
serialized_end=12592,
)
_GETAGGREGATIONSBYAVAILABILITYSTATUSRESPONSE = descriptor.Descriptor(
name='GetAggregationsByAvailabilityStatusResponse',
full_name='GetAggregationsByAvailabilityStatusResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='availability_status_aggregate', full_name='GetAggregationsByAvailabilityStatusResponse.availability_status_aggregate', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=12594,
serialized_end=12708,
)
_ADDPROMOTRACKSREQUEST = descriptor.Descriptor(
name='AddPromoTracksRequest',
full_name='AddPromoTracksRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='AddPromoTracksRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='genre', full_name='AddPromoTracksRequest.genre', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=12710,
serialized_end=12765,
)
_ADDPROMOTRACKSRESPONSE = descriptor.Descriptor(
name='AddPromoTracksResponse',
full_name='AddPromoTracksResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='track', full_name='AddPromoTracksResponse.track', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=12767,
serialized_end=12814,
)
_GETPLAYLISTAGGREGATIONSREQUEST = descriptor.Descriptor(
name='GetPlaylistAggregationsRequest',
full_name='GetPlaylistAggregationsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='GetPlaylistAggregationsRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_results', full_name='GetPlaylistAggregationsRequest.max_results', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=14,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=12816,
serialized_end=12890,
)
_PLAYLISTAGGREGATE = descriptor.Descriptor(
name='PlaylistAggregate',
full_name='PlaylistAggregate',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='playlist_id', full_name='PlaylistAggregate.playlist_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='name', full_name='PlaylistAggregate.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='album_art', full_name='PlaylistAggregate.album_art', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='track_count', full_name='PlaylistAggregate.track_count', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='last_time_played', full_name='PlaylistAggregate.last_time_played', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=12893,
serialized_end=13024,
)
_GETPLAYLISTAGGREGATIONSRESPONSE = descriptor.Descriptor(
name='GetPlaylistAggregationsResponse',
full_name='GetPlaylistAggregationsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='playlist_aggregate', full_name='GetPlaylistAggregationsResponse.playlist_aggregate', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=13026,
serialized_end=13107,
)
_REMOTECONTROLCOMMANDREQUEST = descriptor.Descriptor(
name='RemoteControlCommandRequest',
full_name='RemoteControlCommandRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='gaia_id', full_name='RemoteControlCommandRequest.gaia_id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='command', full_name='RemoteControlCommandRequest.command', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=13109,
serialized_end=13172,
)
_REMOTECONTROLCOMMANDRESPONSE = descriptor.Descriptor(
name='RemoteControlCommandResponse',
full_name='RemoteControlCommandResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='response_code', full_name='RemoteControlCommandResponse.response_code', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_REMOTECONTROLCOMMANDRESPONSE_RESPONSECODE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=13175,
serialized_end=13354,
)
_AUDIOREF.fields_by_name['store'].enum_type = _AUDIOREF_STORE
_AUDIOREF_STORE.containing_type = _AUDIOREF;
_IMAGEREF.fields_by_name['store'].enum_type = _IMAGEREF_STORE
_IMAGEREF.fields_by_name['origin'].enum_type = _IMAGEREF_ORIGIN
_IMAGEREF_STORE.containing_type = _IMAGEREF;
_IMAGEREF_ORIGIN.containing_type = _IMAGEREF;
_TRACK.fields_by_name['audio_ref'].message_type = _AUDIOREF
_TRACK.fields_by_name['album_art_ref'].message_type = _IMAGEREF
_TRACK.fields_by_name['availability_status'].enum_type = _TRACK_AVAILABILITYSTATUS
_TRACK.fields_by_name['content_type'].enum_type = _TRACK_CONTENTTYPE
_TRACK.fields_by_name['channels'].enum_type = _TRACK_CHANNELS
_TRACK.fields_by_name['track_type'].enum_type = _TRACK_TRACKTYPE
_TRACK.fields_by_name['rating'].enum_type = _TRACK_RATING
_TRACK.fields_by_name['uits_metadata'].message_type = uits_pb2._UITSMETADATA
_TRACK.fields_by_name['original_content_type'].enum_type = _TRACK_CONTENTTYPE
_TRACK.fields_by_name['uploaded_uits'].message_type = _UPLOADEDUITSID3TAG
_TRACK_AVAILABILITYSTATUS.containing_type = _TRACK;
_TRACK_CONTENTTYPE.containing_type = _TRACK;
_TRACK_CHANNELS.containing_type = _TRACK;
_TRACK_TRACKTYPE.containing_type = _TRACK;
_TRACK_RATING.containing_type = _TRACK;
_TRACKS.fields_by_name['track'].message_type = _TRACK
_PLAYLIST.fields_by_name['playlist_type'].enum_type = _PLAYLIST_PLAYLISTTYPE
_PLAYLIST.fields_by_name['playlist_art_ref'].message_type = _IMAGEREF
_PLAYLIST_PLAYLISTTYPE.containing_type = _PLAYLIST;
_PLAYLISTENTRY.fields_by_name['relative_position_id_type'].enum_type = _PLAYLISTENTRY_RELATIVEPOSITIONIDTYPE
_PLAYLISTENTRY.fields_by_name['track'].message_type = _TRACK
_PLAYLISTENTRY_RELATIVEPOSITIONIDTYPE.containing_type = _PLAYLISTENTRY;
_TRACKSEARCHRESTRICTION.fields_by_name['attribute'].enum_type = _TRACKSEARCHRESTRICTION_TRACKATTRIBUTE
_TRACKSEARCHRESTRICTION.fields_by_name['comparison_type'].enum_type = _TRACKSEARCHRESTRICTION_COMPARISONTYPE
_TRACKSEARCHRESTRICTION_TRACKATTRIBUTE.containing_type = _TRACKSEARCHRESTRICTION;
_TRACKSEARCHRESTRICTION_COMPARISONTYPE.containing_type = _TRACKSEARCHRESTRICTION;
_TRACKSEARCHRESTRICTIONSET.fields_by_name['type'].enum_type = _TRACKSEARCHRESTRICTIONSET_RESTRICTIONSETTYPE
_TRACKSEARCHRESTRICTIONSET.fields_by_name['restriction'].message_type = _TRACKSEARCHRESTRICTION
_TRACKSEARCHRESTRICTIONSET.fields_by_name['sub_set'].message_type = _TRACKSEARCHRESTRICTIONSET
_TRACKSEARCHRESTRICTIONSET_RESTRICTIONSETTYPE.containing_type = _TRACKSEARCHRESTRICTIONSET;
_TRACKSORTORDER.fields_by_name['attribute'].enum_type = _TRACKSORTORDER_TRACKATTRIBUTE
_TRACKSORTORDER_TRACKATTRIBUTE.containing_type = _TRACKSORTORDER;
_GETTRACKSREQUEST.fields_by_name['search_restriction'].message_type = _TRACKSEARCHRESTRICTION
_GETTRACKSREQUEST.fields_by_name['sort_order'].message_type = _TRACKSORTORDER
_GETTRACKSREQUEST.fields_by_name['restriction_set'].message_type = _TRACKSEARCHRESTRICTIONSET
_GETTRACKSREQUEST.fields_by_name['track_projection'].enum_type = _GETTRACKSREQUEST_TRACKPROJECTION
_GETTRACKSREQUEST_TRACKPROJECTION.containing_type = _GETTRACKSREQUEST;
_GETTRACKSRESPONSE.fields_by_name['response_code'].enum_type = _GETTRACKSRESPONSE_RESPONSECODE
_GETTRACKSRESPONSE.fields_by_name['track'].message_type = _TRACK
_GETTRACKSRESPONSE_RESPONSECODE.containing_type = _GETTRACKSRESPONSE;
_GETPLAYLISTENTRIESRESPONSE.fields_by_name['response_code'].enum_type = _GETPLAYLISTENTRIESRESPONSE_RESPONSECODE
_GETPLAYLISTENTRIESRESPONSE.fields_by_name['playlist_entry'].message_type = _PLAYLISTENTRY
_GETPLAYLISTENTRIESRESPONSE_RESPONSECODE.containing_type = _GETPLAYLISTENTRIESRESPONSE;
_PLAYLISTSORTORDER.fields_by_name['attribute'].enum_type = _PLAYLISTSORTORDER_PLAYLISTATTRIBUTE
_PLAYLISTSORTORDER_PLAYLISTATTRIBUTE.containing_type = _PLAYLISTSORTORDER;
_GETPLAYLISTSREQUEST.fields_by_name['sort_order'].message_type = _PLAYLISTSORTORDER
_GETPLAYLISTSRESPONSE.fields_by_name['response_code'].enum_type = _GETPLAYLISTSRESPONSE_RESPONSECODE
_GETPLAYLISTSRESPONSE.fields_by_name['playlist'].message_type = _PLAYLIST
_GETPLAYLISTSRESPONSE_RESPONSECODE.containing_type = _GETPLAYLISTSRESPONSE;
_BATCHLOOKUPREQUEST.fields_by_name['track'].message_type = _LOOKUPTRACKREQUEST
_BATCHLOOKUPREQUEST.fields_by_name['playlist'].message_type = _LOOKUPPLAYLISTREQUEST
_BATCHLOOKUPREQUEST.fields_by_name['metadata_type'].enum_type = _BATCHLOOKUPREQUEST_METADATATYPE
_BATCHLOOKUPREQUEST.fields_by_name['playlist_entry'].message_type = _LOOKUPPLAYLISTENTRYREQUEST
_BATCHLOOKUPREQUEST_METADATATYPE.containing_type = _BATCHLOOKUPREQUEST;
_BATCHLOOKUPRESPONSE.fields_by_name['track'].message_type = _TRACK
_BATCHLOOKUPRESPONSE.fields_by_name['playlist'].message_type = _PLAYLIST
_BATCHLOOKUPRESPONSE.fields_by_name['playlist_entry'].message_type = _PLAYLISTENTRY
_MUTATETRACKREQUEST.fields_by_name['create_track'].message_type = _TRACK
_MUTATETRACKREQUEST.fields_by_name['update_track'].message_type = _TRACK
_MUTATERESPONSE.fields_by_name['response_code'].enum_type = _MUTATERESPONSE_MUTATERESPONSECODE
_MUTATERESPONSE.fields_by_name['availability_status'].enum_type = _MUTATERESPONSE_AVAILABILITYSTATUS
_MUTATERESPONSE_MUTATERESPONSECODE.containing_type = _MUTATERESPONSE;
_MUTATERESPONSE_AVAILABILITYSTATUS.containing_type = _MUTATERESPONSE;
_BATCHMUTATETRACKSREQUEST.fields_by_name['track_mutation'].message_type = _MUTATETRACKREQUEST
_BATCHMUTATETRACKSRESPONSE.fields_by_name['response_code'].enum_type = _BATCHMUTATETRACKSRESPONSE_BATCHMUTATETRACKSRESPONSECODE
_BATCHMUTATETRACKSRESPONSE.fields_by_name['mutate_response'].message_type = _MUTATERESPONSE
_BATCHMUTATETRACKSRESPONSE_BATCHMUTATETRACKSRESPONSECODE.containing_type = _BATCHMUTATETRACKSRESPONSE;
_MUTATEPLAYLISTREQUEST.fields_by_name['create_playlist'].message_type = _PLAYLIST
_MUTATEPLAYLISTREQUEST.fields_by_name['update_playlist'].message_type = _PLAYLIST
_MUTATEPLAYLISTREQUEST.fields_by_name['playlist_entry'].message_type = _PLAYLISTENTRY
_BATCHMUTATEPLAYLISTSREQUEST.fields_by_name['playlist_mutation'].message_type = _MUTATEPLAYLISTREQUEST
_BATCHMUTATEPLAYLISTSRESPONSE.fields_by_name['response_code'].enum_type = _BATCHMUTATEPLAYLISTSRESPONSE_BATCHMUTATEPLAYLISTSRESPONSECODE
_BATCHMUTATEPLAYLISTSRESPONSE.fields_by_name['mutate_response'].message_type = _MUTATERESPONSE
_BATCHMUTATEPLAYLISTSRESPONSE_BATCHMUTATEPLAYLISTSRESPONSECODE.containing_type = _BATCHMUTATEPLAYLISTSRESPONSE;
_MUTATEPLAYLISTENTRYREQUEST.fields_by_name['create_playlist_entry'].message_type = _PLAYLISTENTRY
_MUTATEPLAYLISTENTRYREQUEST.fields_by_name['update_playlist_entry'].message_type = _PLAYLISTENTRY
_MUTATEPLAYLISTENTRYREQUEST.fields_by_name['delete_playlist_entry'].message_type = _PLAYLISTENTRY
_BATCHMUTATEPLAYLISTENTRIESREQUEST.fields_by_name['playlist_entry_mutation'].message_type = _MUTATEPLAYLISTENTRYREQUEST
_BATCHMUTATEPLAYLISTENTRIESRESPONSE.fields_by_name['response_code'].enum_type = _BATCHMUTATEPLAYLISTENTRIESRESPONSE_BATCHMUTATEPLAYLISTENTRIESRESPONSECODE
_BATCHMUTATEPLAYLISTENTRIESRESPONSE.fields_by_name['mutate_response'].message_type = _MUTATERESPONSE
_BATCHMUTATEPLAYLISTENTRIESRESPONSE_BATCHMUTATEPLAYLISTENTRIESRESPONSECODE.containing_type = _BATCHMUTATEPLAYLISTENTRIESRESPONSE;
_MAGICPLAYLISTSEED.fields_by_name['seed_type'].enum_type = _MAGICPLAYLISTSEED_SEEDTYPE
_MAGICPLAYLISTSEED_SEEDTYPE.containing_type = _MAGICPLAYLISTSEED;
_MAGICPLAYLISTREQUEST.fields_by_name['seed'].message_type = _MAGICPLAYLISTSEED
_MAGICPLAYLISTRESPONSE.fields_by_name['playlist'].message_type = _PLAYLIST
_MAGICPLAYLISTRESPONSE.fields_by_name['playlist_entry'].message_type = _PLAYLISTENTRY
_ALBUM.fields_by_name['album_art'].message_type = _IMAGEREF
_ALBUMSORTORDER.fields_by_name['attribute'].enum_type = _ALBUMSORTORDER_ALBUMATTRIBUTE
_ALBUMSORTORDER_ALBUMATTRIBUTE.containing_type = _ALBUMSORTORDER;
_GETALBUMSREQUEST.fields_by_name['sort_order'].message_type = _ALBUMSORTORDER
_GETALBUMSRESPONSE.fields_by_name['album'].message_type = _ALBUM
_ARTIST.fields_by_name['album'].message_type = _ALBUM
_GETARTISTSREQUEST.fields_by_name['sort_order'].message_type = _ARTISTSORTORDER
_GETARTISTSRESPONSE.fields_by_name['artist'].message_type = _ARTIST
_MUSICGENRE.fields_by_name['album'].message_type = _ALBUM
_GETGENRESREQUEST.fields_by_name['sort_order'].message_type = _GENRESORTORDER
_GETGENRESRESPONSE.fields_by_name['genre'].message_type = _MUSICGENRE
_GETDYNAMICPLAYLISTENTRIESREQUEST.fields_by_name['playlist_entries_type'].enum_type = _GETDYNAMICPLAYLISTENTRIESREQUEST_DYNAMICPLAYLISTENTRIESTYPE
_GETDYNAMICPLAYLISTENTRIESREQUEST_DYNAMICPLAYLISTENTRIESTYPE.containing_type = _GETDYNAMICPLAYLISTENTRIESREQUEST;
_GETDYNAMICPLAYLISTENTRIESRESPONSE.fields_by_name['response_code'].enum_type = _GETDYNAMICPLAYLISTENTRIESRESPONSE_RESPONSECODE
_GETDYNAMICPLAYLISTENTRIESRESPONSE.fields_by_name['playlist_entry'].message_type = _PLAYLISTENTRY
_GETDYNAMICPLAYLISTENTRIESRESPONSE.fields_by_name['playlist_entries_type'].enum_type = _GETDYNAMICPLAYLISTENTRIESRESPONSE_DYNAMICPLAYLISTENTRIESTYPE
_GETDYNAMICPLAYLISTENTRIESRESPONSE_DYNAMICPLAYLISTENTRIESTYPE.containing_type = _GETDYNAMICPLAYLISTENTRIESRESPONSE;
_GETDYNAMICPLAYLISTENTRIESRESPONSE_RESPONSECODE.containing_type = _GETDYNAMICPLAYLISTENTRIESRESPONSE;
_TRACKTYPEAGGREGATE.fields_by_name['track_type_value'].enum_type = _TRACKTYPEAGGREGATE_TRACKTYPE
_TRACKTYPEAGGREGATE_TRACKTYPE.containing_type = _TRACKTYPEAGGREGATE;
_GETAGGREGATIONSBYTRACKTYPERESPONSE.fields_by_name['track_type_aggregate'].message_type = _TRACKTYPEAGGREGATE
_AVAILABILITYSTATUSAGGREGATE.fields_by_name['availability_status'].enum_type = _AVAILABILITYSTATUSAGGREGATE_AVAILABILITYSTATUS
_AVAILABILITYSTATUSAGGREGATE_AVAILABILITYSTATUS.containing_type = _AVAILABILITYSTATUSAGGREGATE;
_GETAGGREGATIONSBYAVAILABILITYSTATUSRESPONSE.fields_by_name['availability_status_aggregate'].message_type = _AVAILABILITYSTATUSAGGREGATE
_ADDPROMOTRACKSRESPONSE.fields_by_name['track'].message_type = _TRACK
_PLAYLISTAGGREGATE.fields_by_name['album_art'].message_type = _IMAGEREF
_GETPLAYLISTAGGREGATIONSRESPONSE.fields_by_name['playlist_aggregate'].message_type = _PLAYLISTAGGREGATE
_REMOTECONTROLCOMMANDRESPONSE.fields_by_name['response_code'].enum_type = _REMOTECONTROLCOMMANDRESPONSE_RESPONSECODE
_REMOTECONTROLCOMMANDRESPONSE_RESPONSECODE.containing_type = _REMOTECONTROLCOMMANDRESPONSE;
DESCRIPTOR.message_types_by_name['AudioRef'] = _AUDIOREF
DESCRIPTOR.message_types_by_name['ImageRef'] = _IMAGEREF
DESCRIPTOR.message_types_by_name['UploadedUitsId3Tag'] = _UPLOADEDUITSID3TAG
DESCRIPTOR.message_types_by_name['Track'] = _TRACK
DESCRIPTOR.message_types_by_name['Tracks'] = _TRACKS
DESCRIPTOR.message_types_by_name['Playlist'] = _PLAYLIST
DESCRIPTOR.message_types_by_name['PlaylistEntry'] = _PLAYLISTENTRY
DESCRIPTOR.message_types_by_name['TrackSearchRestriction'] = _TRACKSEARCHRESTRICTION
DESCRIPTOR.message_types_by_name['TrackSearchRestrictionSet'] = _TRACKSEARCHRESTRICTIONSET
DESCRIPTOR.message_types_by_name['TrackSortOrder'] = _TRACKSORTORDER
DESCRIPTOR.message_types_by_name['GetTracksRequest'] = _GETTRACKSREQUEST
DESCRIPTOR.message_types_by_name['GetTracksResponse'] = _GETTRACKSRESPONSE
DESCRIPTOR.message_types_by_name['GetPlaylistEntriesRequest'] = _GETPLAYLISTENTRIESREQUEST
DESCRIPTOR.message_types_by_name['GetPlaylistEntriesResponse'] = _GETPLAYLISTENTRIESRESPONSE
DESCRIPTOR.message_types_by_name['PlaylistSortOrder'] = _PLAYLISTSORTORDER
DESCRIPTOR.message_types_by_name['GetPlaylistsRequest'] = _GETPLAYLISTSREQUEST
DESCRIPTOR.message_types_by_name['GetPlaylistsResponse'] = _GETPLAYLISTSRESPONSE
DESCRIPTOR.message_types_by_name['LookupTrackRequest'] = _LOOKUPTRACKREQUEST
DESCRIPTOR.message_types_by_name['LookupPlaylistEntryRequest'] = _LOOKUPPLAYLISTENTRYREQUEST
DESCRIPTOR.message_types_by_name['LookupPlaylistRequest'] = _LOOKUPPLAYLISTREQUEST
DESCRIPTOR.message_types_by_name['BatchLookupRequest'] = _BATCHLOOKUPREQUEST
DESCRIPTOR.message_types_by_name['BatchLookupResponse'] = _BATCHLOOKUPRESPONSE
DESCRIPTOR.message_types_by_name['MutateTrackRequest'] = _MUTATETRACKREQUEST
DESCRIPTOR.message_types_by_name['MutateResponse'] = _MUTATERESPONSE
DESCRIPTOR.message_types_by_name['BatchMutateTracksRequest'] = _BATCHMUTATETRACKSREQUEST
DESCRIPTOR.message_types_by_name['BatchMutateTracksResponse'] = _BATCHMUTATETRACKSRESPONSE
DESCRIPTOR.message_types_by_name['MutatePlaylistRequest'] = _MUTATEPLAYLISTREQUEST
DESCRIPTOR.message_types_by_name['BatchMutatePlaylistsRequest'] = _BATCHMUTATEPLAYLISTSREQUEST
DESCRIPTOR.message_types_by_name['BatchMutatePlaylistsResponse'] = _BATCHMUTATEPLAYLISTSRESPONSE
DESCRIPTOR.message_types_by_name['MutatePlaylistEntryRequest'] = _MUTATEPLAYLISTENTRYREQUEST
DESCRIPTOR.message_types_by_name['BatchMutatePlaylistEntriesRequest'] = _BATCHMUTATEPLAYLISTENTRIESREQUEST
DESCRIPTOR.message_types_by_name['BatchMutatePlaylistEntriesResponse'] = _BATCHMUTATEPLAYLISTENTRIESRESPONSE
DESCRIPTOR.message_types_by_name['MagicPlaylistSeed'] = _MAGICPLAYLISTSEED
DESCRIPTOR.message_types_by_name['MagicPlaylistRequest'] = _MAGICPLAYLISTREQUEST
DESCRIPTOR.message_types_by_name['MagicPlaylistResponse'] = _MAGICPLAYLISTRESPONSE
DESCRIPTOR.message_types_by_name['FlushLockerRequest'] = _FLUSHLOCKERREQUEST
DESCRIPTOR.message_types_by_name['FlushLockerResponse'] = _FLUSHLOCKERRESPONSE
DESCRIPTOR.message_types_by_name['LockerNotification'] = _LOCKERNOTIFICATION
DESCRIPTOR.message_types_by_name['Album'] = _ALBUM
DESCRIPTOR.message_types_by_name['AlbumSortOrder'] = _ALBUMSORTORDER
DESCRIPTOR.message_types_by_name['GetAlbumsRequest'] = _GETALBUMSREQUEST
DESCRIPTOR.message_types_by_name['GetAlbumsResponse'] = _GETALBUMSRESPONSE
DESCRIPTOR.message_types_by_name['Artist'] = _ARTIST
DESCRIPTOR.message_types_by_name['ArtistSortOrder'] = _ARTISTSORTORDER
DESCRIPTOR.message_types_by_name['GetArtistsRequest'] = _GETARTISTSREQUEST
DESCRIPTOR.message_types_by_name['GetArtistsResponse'] = _GETARTISTSRESPONSE
DESCRIPTOR.message_types_by_name['MusicGenre'] = _MUSICGENRE
DESCRIPTOR.message_types_by_name['GenreSortOrder'] = _GENRESORTORDER
DESCRIPTOR.message_types_by_name['GetGenresRequest'] = _GETGENRESREQUEST
DESCRIPTOR.message_types_by_name['GetGenresResponse'] = _GETGENRESRESPONSE
DESCRIPTOR.message_types_by_name['GetDynamicPlaylistEntriesRequest'] = _GETDYNAMICPLAYLISTENTRIESREQUEST
DESCRIPTOR.message_types_by_name['GetDynamicPlaylistEntriesResponse'] = _GETDYNAMICPLAYLISTENTRIESRESPONSE
DESCRIPTOR.message_types_by_name['GetAggregationsByTrackTypeRequest'] = _GETAGGREGATIONSBYTRACKTYPEREQUEST
DESCRIPTOR.message_types_by_name['TrackTypeAggregate'] = _TRACKTYPEAGGREGATE
DESCRIPTOR.message_types_by_name['GetAggregationsByTrackTypeResponse'] = _GETAGGREGATIONSBYTRACKTYPERESPONSE
DESCRIPTOR.message_types_by_name['GetAggregationsByAvailabilityStatusRequest'] = _GETAGGREGATIONSBYAVAILABILITYSTATUSREQUEST
DESCRIPTOR.message_types_by_name['AvailabilityStatusAggregate'] = _AVAILABILITYSTATUSAGGREGATE
DESCRIPTOR.message_types_by_name['GetAggregationsByAvailabilityStatusResponse'] = _GETAGGREGATIONSBYAVAILABILITYSTATUSRESPONSE
DESCRIPTOR.message_types_by_name['AddPromoTracksRequest'] = _ADDPROMOTRACKSREQUEST
DESCRIPTOR.message_types_by_name['AddPromoTracksResponse'] = _ADDPROMOTRACKSRESPONSE
DESCRIPTOR.message_types_by_name['GetPlaylistAggregationsRequest'] = _GETPLAYLISTAGGREGATIONSREQUEST
DESCRIPTOR.message_types_by_name['PlaylistAggregate'] = _PLAYLISTAGGREGATE
DESCRIPTOR.message_types_by_name['GetPlaylistAggregationsResponse'] = _GETPLAYLISTAGGREGATIONSRESPONSE
DESCRIPTOR.message_types_by_name['RemoteControlCommandRequest'] = _REMOTECONTROLCOMMANDREQUEST
DESCRIPTOR.message_types_by_name['RemoteControlCommandResponse'] = _REMOTECONTROLCOMMANDRESPONSE
class AudioRef(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _AUDIOREF
# @@protoc_insertion_point(class_scope:AudioRef)
class ImageRef(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _IMAGEREF
# @@protoc_insertion_point(class_scope:ImageRef)
class UploadedUitsId3Tag(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _UPLOADEDUITSID3TAG
# @@protoc_insertion_point(class_scope:UploadedUitsId3Tag)
class Track(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _TRACK
# @@protoc_insertion_point(class_scope:Track)
class Tracks(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _TRACKS
# @@protoc_insertion_point(class_scope:Tracks)
class Playlist(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PLAYLIST
# @@protoc_insertion_point(class_scope:Playlist)
class PlaylistEntry(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PLAYLISTENTRY
# @@protoc_insertion_point(class_scope:PlaylistEntry)
class TrackSearchRestriction(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _TRACKSEARCHRESTRICTION
# @@protoc_insertion_point(class_scope:TrackSearchRestriction)
class TrackSearchRestrictionSet(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _TRACKSEARCHRESTRICTIONSET
# @@protoc_insertion_point(class_scope:TrackSearchRestrictionSet)
class TrackSortOrder(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _TRACKSORTORDER
# @@protoc_insertion_point(class_scope:TrackSortOrder)
class GetTracksRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETTRACKSREQUEST
# @@protoc_insertion_point(class_scope:GetTracksRequest)
class GetTracksResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETTRACKSRESPONSE
# @@protoc_insertion_point(class_scope:GetTracksResponse)
class GetPlaylistEntriesRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETPLAYLISTENTRIESREQUEST
# @@protoc_insertion_point(class_scope:GetPlaylistEntriesRequest)
class GetPlaylistEntriesResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETPLAYLISTENTRIESRESPONSE
# @@protoc_insertion_point(class_scope:GetPlaylistEntriesResponse)
class PlaylistSortOrder(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PLAYLISTSORTORDER
# @@protoc_insertion_point(class_scope:PlaylistSortOrder)
class GetPlaylistsRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETPLAYLISTSREQUEST
# @@protoc_insertion_point(class_scope:GetPlaylistsRequest)
class GetPlaylistsResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETPLAYLISTSRESPONSE
# @@protoc_insertion_point(class_scope:GetPlaylistsResponse)
class LookupTrackRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _LOOKUPTRACKREQUEST
# @@protoc_insertion_point(class_scope:LookupTrackRequest)
class LookupPlaylistEntryRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _LOOKUPPLAYLISTENTRYREQUEST
# @@protoc_insertion_point(class_scope:LookupPlaylistEntryRequest)
class LookupPlaylistRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _LOOKUPPLAYLISTREQUEST
# @@protoc_insertion_point(class_scope:LookupPlaylistRequest)
class BatchLookupRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _BATCHLOOKUPREQUEST
# @@protoc_insertion_point(class_scope:BatchLookupRequest)
class BatchLookupResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _BATCHLOOKUPRESPONSE
# @@protoc_insertion_point(class_scope:BatchLookupResponse)
class MutateTrackRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _MUTATETRACKREQUEST
# @@protoc_insertion_point(class_scope:MutateTrackRequest)
class MutateResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _MUTATERESPONSE
# @@protoc_insertion_point(class_scope:MutateResponse)
class BatchMutateTracksRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _BATCHMUTATETRACKSREQUEST
# @@protoc_insertion_point(class_scope:BatchMutateTracksRequest)
class BatchMutateTracksResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _BATCHMUTATETRACKSRESPONSE
# @@protoc_insertion_point(class_scope:BatchMutateTracksResponse)
class MutatePlaylistRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _MUTATEPLAYLISTREQUEST
# @@protoc_insertion_point(class_scope:MutatePlaylistRequest)
class BatchMutatePlaylistsRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _BATCHMUTATEPLAYLISTSREQUEST
# @@protoc_insertion_point(class_scope:BatchMutatePlaylistsRequest)
class BatchMutatePlaylistsResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _BATCHMUTATEPLAYLISTSRESPONSE
# @@protoc_insertion_point(class_scope:BatchMutatePlaylistsResponse)
class MutatePlaylistEntryRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _MUTATEPLAYLISTENTRYREQUEST
# @@protoc_insertion_point(class_scope:MutatePlaylistEntryRequest)
class BatchMutatePlaylistEntriesRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _BATCHMUTATEPLAYLISTENTRIESREQUEST
# @@protoc_insertion_point(class_scope:BatchMutatePlaylistEntriesRequest)
class BatchMutatePlaylistEntriesResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _BATCHMUTATEPLAYLISTENTRIESRESPONSE
# @@protoc_insertion_point(class_scope:BatchMutatePlaylistEntriesResponse)
class MagicPlaylistSeed(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _MAGICPLAYLISTSEED
# @@protoc_insertion_point(class_scope:MagicPlaylistSeed)
class MagicPlaylistRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _MAGICPLAYLISTREQUEST
# @@protoc_insertion_point(class_scope:MagicPlaylistRequest)
class MagicPlaylistResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _MAGICPLAYLISTRESPONSE
# @@protoc_insertion_point(class_scope:MagicPlaylistResponse)
class FlushLockerRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _FLUSHLOCKERREQUEST
# @@protoc_insertion_point(class_scope:FlushLockerRequest)
class FlushLockerResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _FLUSHLOCKERRESPONSE
# @@protoc_insertion_point(class_scope:FlushLockerResponse)
class LockerNotification(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _LOCKERNOTIFICATION
# @@protoc_insertion_point(class_scope:LockerNotification)
class Album(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ALBUM
# @@protoc_insertion_point(class_scope:Album)
class AlbumSortOrder(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ALBUMSORTORDER
# @@protoc_insertion_point(class_scope:AlbumSortOrder)
class GetAlbumsRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETALBUMSREQUEST
# @@protoc_insertion_point(class_scope:GetAlbumsRequest)
class GetAlbumsResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETALBUMSRESPONSE
# @@protoc_insertion_point(class_scope:GetAlbumsResponse)
class Artist(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ARTIST
# @@protoc_insertion_point(class_scope:Artist)
class ArtistSortOrder(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ARTISTSORTORDER
# @@protoc_insertion_point(class_scope:ArtistSortOrder)
class GetArtistsRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETARTISTSREQUEST
# @@protoc_insertion_point(class_scope:GetArtistsRequest)
class GetArtistsResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETARTISTSRESPONSE
# @@protoc_insertion_point(class_scope:GetArtistsResponse)
class MusicGenre(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _MUSICGENRE
# @@protoc_insertion_point(class_scope:MusicGenre)
class GenreSortOrder(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GENRESORTORDER
# @@protoc_insertion_point(class_scope:GenreSortOrder)
class GetGenresRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETGENRESREQUEST
# @@protoc_insertion_point(class_scope:GetGenresRequest)
class GetGenresResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETGENRESRESPONSE
# @@protoc_insertion_point(class_scope:GetGenresResponse)
class GetDynamicPlaylistEntriesRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETDYNAMICPLAYLISTENTRIESREQUEST
# @@protoc_insertion_point(class_scope:GetDynamicPlaylistEntriesRequest)
class GetDynamicPlaylistEntriesResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETDYNAMICPLAYLISTENTRIESRESPONSE
# @@protoc_insertion_point(class_scope:GetDynamicPlaylistEntriesResponse)
class GetAggregationsByTrackTypeRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETAGGREGATIONSBYTRACKTYPEREQUEST
# @@protoc_insertion_point(class_scope:GetAggregationsByTrackTypeRequest)
class TrackTypeAggregate(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _TRACKTYPEAGGREGATE
# @@protoc_insertion_point(class_scope:TrackTypeAggregate)
class GetAggregationsByTrackTypeResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETAGGREGATIONSBYTRACKTYPERESPONSE
# @@protoc_insertion_point(class_scope:GetAggregationsByTrackTypeResponse)
class GetAggregationsByAvailabilityStatusRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETAGGREGATIONSBYAVAILABILITYSTATUSREQUEST
# @@protoc_insertion_point(class_scope:GetAggregationsByAvailabilityStatusRequest)
class AvailabilityStatusAggregate(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _AVAILABILITYSTATUSAGGREGATE
# @@protoc_insertion_point(class_scope:AvailabilityStatusAggregate)
class GetAggregationsByAvailabilityStatusResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETAGGREGATIONSBYAVAILABILITYSTATUSRESPONSE
# @@protoc_insertion_point(class_scope:GetAggregationsByAvailabilityStatusResponse)
class AddPromoTracksRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ADDPROMOTRACKSREQUEST
# @@protoc_insertion_point(class_scope:AddPromoTracksRequest)
class AddPromoTracksResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ADDPROMOTRACKSRESPONSE
# @@protoc_insertion_point(class_scope:AddPromoTracksResponse)
class GetPlaylistAggregationsRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETPLAYLISTAGGREGATIONSREQUEST
# @@protoc_insertion_point(class_scope:GetPlaylistAggregationsRequest)
class PlaylistAggregate(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PLAYLISTAGGREGATE
# @@protoc_insertion_point(class_scope:PlaylistAggregate)
class GetPlaylistAggregationsResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETPLAYLISTAGGREGATIONSRESPONSE
# @@protoc_insertion_point(class_scope:GetPlaylistAggregationsResponse)
class RemoteControlCommandRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _REMOTECONTROLCOMMANDREQUEST
# @@protoc_insertion_point(class_scope:RemoteControlCommandRequest)
class RemoteControlCommandResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _REMOTECONTROLCOMMANDRESPONSE
# @@protoc_insertion_point(class_scope:RemoteControlCommandResponse)
# @@protoc_insertion_point(module_scope)
| true | true |
f71acc6d33db887796f3bf9b80851bc5ea533057 | 1,180 | py | Python | deeplmodel/source/wer.py | Haftom2323/AMH-STT | b0292a6c704b3b94eff7a536a4da04f905cb42fb | [
"MIT"
] | 1 | 2022-03-13T19:49:39.000Z | 2022-03-13T19:49:39.000Z | deeplmodel/source/wer.py | eyerus21/AMH-STT | b0292a6c704b3b94eff7a536a4da04f905cb42fb | [
"MIT"
] | null | null | null | deeplmodel/source/wer.py | eyerus21/AMH-STT | b0292a6c704b3b94eff7a536a4da04f905cb42fb | [
"MIT"
] | 11 | 2021-08-02T19:29:47.000Z | 2022-03-13T17:25:17.000Z | def wer(r, h):
"""
Calculation of WER with Levenshtein distance.
Works only for iterables up to 254 elements (uint8).
O(nm) time ans space complexity.
Parameters
----------
r : list
h : list
Returns
-------
int
Examples
--------
>>> wer("who is there".split(), "is there".split())
1
>>> wer("who is there".split(), "".split())
3
>>> wer("".split(), "who is there".split())
3
"""
# initialisation
import numpy
d = numpy.zeros((len(r)+1)*(len(h)+1), dtype=numpy.uint8)
d = d.reshape((len(r)+1, len(h)+1))
for i in range(len(r)+1):
for j in range(len(h)+1):
if i == 0:
d[0][j] = j
elif j == 0:
d[i][0] = i
# computation
for i in range(1, len(r)+1):
for j in range(1, len(h)+1):
if r[i-1] == h[j-1]:
d[i][j] = d[i-1][j-1]
else:
substitution = d[i-1][j-1] + 1
insertion = d[i][j-1] + 1
deletion = d[i-1][j] + 1
d[i][j] = min(substitution, insertion, deletion)
return d[len(r)][len(h)] | 24.583333 | 64 | 0.440678 | def wer(r, h):
import numpy
d = numpy.zeros((len(r)+1)*(len(h)+1), dtype=numpy.uint8)
d = d.reshape((len(r)+1, len(h)+1))
for i in range(len(r)+1):
for j in range(len(h)+1):
if i == 0:
d[0][j] = j
elif j == 0:
d[i][0] = i
for i in range(1, len(r)+1):
for j in range(1, len(h)+1):
if r[i-1] == h[j-1]:
d[i][j] = d[i-1][j-1]
else:
substitution = d[i-1][j-1] + 1
insertion = d[i][j-1] + 1
deletion = d[i-1][j] + 1
d[i][j] = min(substitution, insertion, deletion)
return d[len(r)][len(h)] | true | true |
f71acdd0b906e1300a3decc62a833ed0cf01a8fa | 7,182 | py | Python | club_crm/club_crm/report/fitness_commission_summary/fitness_commission_summary.py | VivekChamp/clubcrm | 82036360d867d3dc5406bc71445a98841b5bffbf | [
"MIT"
] | null | null | null | club_crm/club_crm/report/fitness_commission_summary/fitness_commission_summary.py | VivekChamp/clubcrm | 82036360d867d3dc5406bc71445a98841b5bffbf | [
"MIT"
] | null | null | null | club_crm/club_crm/report/fitness_commission_summary/fitness_commission_summary.py | VivekChamp/clubcrm | 82036360d867d3dc5406bc71445a98841b5bffbf | [
"MIT"
] | null | null | null | # Copyright (c) 2013, Blue Lynx and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
import math
from frappe.utils import getdate, get_time, flt
from datetime import datetime, timedelta, date, time
import calendar
def execute(filters=None):
columns, data = [], []
if filters:
columns = get_column()
data = get_data(filters)
return columns, data
def get_column():
columns = [
{
"label": "Staff Name",
"fieldname": "staff_name",
"fieldtype": "Data",
"width": 120
},
{
"label": "PT Count (Hours)",
"fieldname": "pt_count",
"fieldtype": "Data",
"width": 120
},
{
"label": "GX Count (Hours)",
"fieldname": "gx_count",
"fieldtype": "Data",
"width": 120
},
{
"label": "Others (Hours)",
"fieldname": "ot_count",
"fieldtype": "Data",
"width": 120
},
{
"label": "PT Commissions",
"fieldname": "pt_commission",
"fieldtype": "Currency",
"width": 150
},
{
"label": "GX Commissions",
"fieldname": "gc_commission",
"fieldtype": "Currency",
"width": 150
},
{
"label": "Other Commissions",
"fieldname": "other_commission",
"fieldtype": "Currency",
"width": 150,
"default": 0.0
},
{
"label": "Total Commission",
"fieldname": "total_commission",
"fieldtype": "Currency",
"width": 150
}
]
return columns
def get_data(filters):
data = []
final_data = []
year = int(filters['year'])
if 'date_range' in filters:
if filters['date_range'] == "Month":
month = filters['month']
month_number = int(datetime.strptime(month, '%B').month)
last_day = calendar.monthrange(year, month_number)[1]
start_date = datetime(year, month_number, 1)
start = start_date.date()
end_date = datetime(year, month_number, last_day)
end = end_date.date()
elif filters['date_range'] == "Custom Range":
start = getdate(filters['from_date'])
end = getdate( filters['to_date'])
if 'service_staff' in filters:
staff_list = frappe.get_all('Service Staff', filters={'name': filters['service_staff']})
else:
staff_list = frappe.db.get_list('Service Staff', filters=[['fitness_check', '=', 1]], fields=['name'])
settings = frappe.get_doc('Fitness Training Settings')
if staff_list:
for staff in staff_list:
pt_count = 0.0
ot_count = 0.0
other_commission = 0.0
service_staff = frappe.get_doc('Service Staff', staff.name)
if service_staff.fitness_service_assignment:
for services in service_staff.fitness_service_assignment:
if services.commission_applicable:
appointments_list = frappe.db.get_list('Fitness Training Appointment', filters=[['fitness_service', '=', services.fitness_package], ['appointment_date', 'between', [start, end]], ['payment_status', '=', 'Paid'], ['service_staff', '=', staff.name], ['appointment_status', 'in', {'Completed', 'No Show'}]], fields=['name', 'fitness_service'])
if services.commission_type == "Standard":
if appointments_list:
for appointments in appointments_list:
pt_service = frappe.get_doc('Fitness Services', appointments.fitness_service)
if pt_service.session_for == "Single":
pt_count += settings.single_session
elif pt_service.session_for == "Couple":
pt_count += settings.couple_session
elif services.commission_type == "Custom":
if appointments_list:
for appointments in appointments_list:
other_commission += services.commission_amount
ot_count += 1
staff['staff_name']= staff.name
staff['pt_count'] = pt_count
staff['ot_count'] = ot_count
staff['other_commission'] = other_commission
gc = []
gc_list = frappe.db.get_list('Group Class', filters=[['class_date', 'between', [start, end]], ['trainer_name', '=', staff.name], ['class_status', '=', 'Completed']], fields=['count(name) as gx_count'], group_by='trainer_name')
if gc_list:
for group_class in gc_list:
group_class_attendee = frappe.get_all('Group Class Attendees', filters={'group_class': group_class.name, 'attendee_status': 'Complete' })
if group_class_attendee:
if len(group_class_attendee) >= 3:
gc.append(group_class)
staff['gx_count'] = len(gc)
data.append(staff)
for row in data:
row['gc_commission'] = float(row['gx_count']) * float(settings.group_class_rate)
pt = calculate_pt(row['pt_count'], row['gx_count'])
row['pt_commission'] = pt
row['total_commission'] = row['gc_commission'] + row['pt_commission'] + row['other_commission']
final_data.append(row)
return final_data
def month():
year = 2021
months = 'July'
month_number = datetime.strptime(months, '%B').month
last_day = calendar.monthrange(year, month_number)[1]
start_date = datetime(year, month_number, 1)
start = start_date.date()
end_date = datetime(year, month_number, last_day)
end = end_date.date()
staff_list = frappe.db.get_list('Service Staff', filters=[['fitness_check', '=', 1]], fields=['name'])
for staff in staff_list:
gc_list = frappe.db.get_list('Group Class', filters=[['class_date', 'between', [start, end]], ['trainer_name', '=', 'Jatinder'], ['class_status', '=', 'Completed']], fields=['count(name) as gc_count'], group_by='trainer_name')
for gc in gc_list:
return type(gc.gc_count)
@frappe.whitelist()
def calculate_pt(pt_count, gx_count):
total_count = pt_count + gx_count
scale = {(0, 30): 40, (30, 60): 60, (60, 90): 80, (90, 120): 100, (120, 150): 120, (150, math.inf): 140}
hours_worked = total_count
decimal_rate = next(rate for (lower, upper), rate in scale.items() if lower <= hours_worked and upper >= hours_worked)
decimal_end = hours_worked - int(hours_worked)
end_pay = decimal_end * decimal_rate
# Use an integer for ease of calculation
hours_worked = int(hours_worked)
hours_paid_for = 0
# Beginning total pay is just the decimal "ending"
total_pay = end_pay
while hours_paid_for < hours_worked:
# Find the rate for the current bucket of hours
rate_filter = (rate for (lower, upper), rate in scale.items() if lower <= hours_paid_for and hours_paid_for < upper)
current_level = next(rate_filter)
total_pay += current_level
hours_paid_for += 1
total_session = total_pay
scale_1 = {(0, 30): 40, (30, 60): 60, (60, 90): 80, (90, 120): 100, (120, 150): 120, (150, math.inf): 140}
hours_worked_1 = gx_count
decimal_rate_1 = next(rate for (lower, upper), rate in scale_1.items() if lower <= hours_worked_1 and upper >= hours_worked_1)
decimal_end_1 = hours_worked_1 - int(hours_worked_1)
end_pay_1 = decimal_end_1 * decimal_rate_1
# Use an integer for ease of calculation
hours_worked_1 = int(hours_worked_1)
hours_paid_for_1 = 0
# Beginning total pay is just the decimal "ending"
total_pay_1 = end_pay_1
while hours_paid_for_1 < hours_worked_1:
# Find the rate for the current bucket of hours
rate_filter = (rate for (lower, upper), rate in scale_1.items() if lower <= hours_paid_for_1 and hours_paid_for_1 < upper)
current_level = next(rate_filter)
total_pay_1 += current_level
hours_paid_for_1 += 1
total_gc = total_pay_1
commission_from_pt = total_session - total_gc
return commission_from_pt | 31.778761 | 346 | 0.690058 |
from __future__ import unicode_literals
import frappe
from frappe import _
import math
from frappe.utils import getdate, get_time, flt
from datetime import datetime, timedelta, date, time
import calendar
def execute(filters=None):
columns, data = [], []
if filters:
columns = get_column()
data = get_data(filters)
return columns, data
def get_column():
columns = [
{
"label": "Staff Name",
"fieldname": "staff_name",
"fieldtype": "Data",
"width": 120
},
{
"label": "PT Count (Hours)",
"fieldname": "pt_count",
"fieldtype": "Data",
"width": 120
},
{
"label": "GX Count (Hours)",
"fieldname": "gx_count",
"fieldtype": "Data",
"width": 120
},
{
"label": "Others (Hours)",
"fieldname": "ot_count",
"fieldtype": "Data",
"width": 120
},
{
"label": "PT Commissions",
"fieldname": "pt_commission",
"fieldtype": "Currency",
"width": 150
},
{
"label": "GX Commissions",
"fieldname": "gc_commission",
"fieldtype": "Currency",
"width": 150
},
{
"label": "Other Commissions",
"fieldname": "other_commission",
"fieldtype": "Currency",
"width": 150,
"default": 0.0
},
{
"label": "Total Commission",
"fieldname": "total_commission",
"fieldtype": "Currency",
"width": 150
}
]
return columns
def get_data(filters):
data = []
final_data = []
year = int(filters['year'])
if 'date_range' in filters:
if filters['date_range'] == "Month":
month = filters['month']
month_number = int(datetime.strptime(month, '%B').month)
last_day = calendar.monthrange(year, month_number)[1]
start_date = datetime(year, month_number, 1)
start = start_date.date()
end_date = datetime(year, month_number, last_day)
end = end_date.date()
elif filters['date_range'] == "Custom Range":
start = getdate(filters['from_date'])
end = getdate( filters['to_date'])
if 'service_staff' in filters:
staff_list = frappe.get_all('Service Staff', filters={'name': filters['service_staff']})
else:
staff_list = frappe.db.get_list('Service Staff', filters=[['fitness_check', '=', 1]], fields=['name'])
settings = frappe.get_doc('Fitness Training Settings')
if staff_list:
for staff in staff_list:
pt_count = 0.0
ot_count = 0.0
other_commission = 0.0
service_staff = frappe.get_doc('Service Staff', staff.name)
if service_staff.fitness_service_assignment:
for services in service_staff.fitness_service_assignment:
if services.commission_applicable:
appointments_list = frappe.db.get_list('Fitness Training Appointment', filters=[['fitness_service', '=', services.fitness_package], ['appointment_date', 'between', [start, end]], ['payment_status', '=', 'Paid'], ['service_staff', '=', staff.name], ['appointment_status', 'in', {'Completed', 'No Show'}]], fields=['name', 'fitness_service'])
if services.commission_type == "Standard":
if appointments_list:
for appointments in appointments_list:
pt_service = frappe.get_doc('Fitness Services', appointments.fitness_service)
if pt_service.session_for == "Single":
pt_count += settings.single_session
elif pt_service.session_for == "Couple":
pt_count += settings.couple_session
elif services.commission_type == "Custom":
if appointments_list:
for appointments in appointments_list:
other_commission += services.commission_amount
ot_count += 1
staff['staff_name']= staff.name
staff['pt_count'] = pt_count
staff['ot_count'] = ot_count
staff['other_commission'] = other_commission
gc = []
gc_list = frappe.db.get_list('Group Class', filters=[['class_date', 'between', [start, end]], ['trainer_name', '=', staff.name], ['class_status', '=', 'Completed']], fields=['count(name) as gx_count'], group_by='trainer_name')
if gc_list:
for group_class in gc_list:
group_class_attendee = frappe.get_all('Group Class Attendees', filters={'group_class': group_class.name, 'attendee_status': 'Complete' })
if group_class_attendee:
if len(group_class_attendee) >= 3:
gc.append(group_class)
staff['gx_count'] = len(gc)
data.append(staff)
for row in data:
row['gc_commission'] = float(row['gx_count']) * float(settings.group_class_rate)
pt = calculate_pt(row['pt_count'], row['gx_count'])
row['pt_commission'] = pt
row['total_commission'] = row['gc_commission'] + row['pt_commission'] + row['other_commission']
final_data.append(row)
return final_data
def month():
year = 2021
months = 'July'
month_number = datetime.strptime(months, '%B').month
last_day = calendar.monthrange(year, month_number)[1]
start_date = datetime(year, month_number, 1)
start = start_date.date()
end_date = datetime(year, month_number, last_day)
end = end_date.date()
staff_list = frappe.db.get_list('Service Staff', filters=[['fitness_check', '=', 1]], fields=['name'])
for staff in staff_list:
gc_list = frappe.db.get_list('Group Class', filters=[['class_date', 'between', [start, end]], ['trainer_name', '=', 'Jatinder'], ['class_status', '=', 'Completed']], fields=['count(name) as gc_count'], group_by='trainer_name')
for gc in gc_list:
return type(gc.gc_count)
@frappe.whitelist()
def calculate_pt(pt_count, gx_count):
total_count = pt_count + gx_count
scale = {(0, 30): 40, (30, 60): 60, (60, 90): 80, (90, 120): 100, (120, 150): 120, (150, math.inf): 140}
hours_worked = total_count
decimal_rate = next(rate for (lower, upper), rate in scale.items() if lower <= hours_worked and upper >= hours_worked)
decimal_end = hours_worked - int(hours_worked)
end_pay = decimal_end * decimal_rate
hours_worked = int(hours_worked)
hours_paid_for = 0
total_pay = end_pay
while hours_paid_for < hours_worked:
rate_filter = (rate for (lower, upper), rate in scale.items() if lower <= hours_paid_for and hours_paid_for < upper)
current_level = next(rate_filter)
total_pay += current_level
hours_paid_for += 1
total_session = total_pay
scale_1 = {(0, 30): 40, (30, 60): 60, (60, 90): 80, (90, 120): 100, (120, 150): 120, (150, math.inf): 140}
hours_worked_1 = gx_count
decimal_rate_1 = next(rate for (lower, upper), rate in scale_1.items() if lower <= hours_worked_1 and upper >= hours_worked_1)
decimal_end_1 = hours_worked_1 - int(hours_worked_1)
end_pay_1 = decimal_end_1 * decimal_rate_1
hours_worked_1 = int(hours_worked_1)
hours_paid_for_1 = 0
total_pay_1 = end_pay_1
while hours_paid_for_1 < hours_worked_1:
rate_filter = (rate for (lower, upper), rate in scale_1.items() if lower <= hours_paid_for_1 and hours_paid_for_1 < upper)
current_level = next(rate_filter)
total_pay_1 += current_level
hours_paid_for_1 += 1
total_gc = total_pay_1
commission_from_pt = total_session - total_gc
return commission_from_pt | true | true |
f71ace2a9df90effa5053c4c417c48be91c319fc | 1,241 | py | Python | setup.py | larsrollik/serial_weighing_scale | 312218cbbb6b84b011d83980b3df6e0e99b36e50 | [
"BSD-3-Clause"
] | null | null | null | setup.py | larsrollik/serial_weighing_scale | 312218cbbb6b84b011d83980b3df6e0e99b36e50 | [
"BSD-3-Clause"
] | null | null | null | setup.py | larsrollik/serial_weighing_scale | 312218cbbb6b84b011d83980b3df6e0e99b36e50 | [
"BSD-3-Clause"
] | null | null | null | from os import path
from setuptools import find_packages
from setuptools import setup
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, "README.md")) as f:
long_description = f.read()
with open(path.join(this_directory, "LICENSE")) as f:
license_text = f.read()
setup(
name="serial_weighing_scale",
version="0.0.6",
description="serial_weighing_scale",
long_description=long_description,
long_description_content_type="text/markdown",
python_requires=">=3.6",
packages=find_packages(),
url="https://github.com/larsrollik/SerialWeighingScale",
author="Lars B. Rollik",
author_email="L.B.Rollik@protonmail.com",
license=license_text,
install_requires=[
"pyserial",
],
extras_require={
"dev": [
"black",
"pytest-cov",
"pytest",
"gitpython",
"coverage>=5.0.3",
"bump2version",
"pre-commit",
"flake8",
],
},
zip_safe=False,
include_package_data=True,
# entry_points={
# "console_scripts": [
# "console_script_name = module.path.to.function:function_name",
# ],
# },
)
| 24.82 | 76 | 0.611604 | from os import path
from setuptools import find_packages
from setuptools import setup
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, "README.md")) as f:
long_description = f.read()
with open(path.join(this_directory, "LICENSE")) as f:
license_text = f.read()
setup(
name="serial_weighing_scale",
version="0.0.6",
description="serial_weighing_scale",
long_description=long_description,
long_description_content_type="text/markdown",
python_requires=">=3.6",
packages=find_packages(),
url="https://github.com/larsrollik/SerialWeighingScale",
author="Lars B. Rollik",
author_email="L.B.Rollik@protonmail.com",
license=license_text,
install_requires=[
"pyserial",
],
extras_require={
"dev": [
"black",
"pytest-cov",
"pytest",
"gitpython",
"coverage>=5.0.3",
"bump2version",
"pre-commit",
"flake8",
],
},
zip_safe=False,
include_package_data=True,
)
| true | true |
f71ace2c76abb44e4261efab937f353dece55020 | 418 | py | Python | mrp_system/migrations/0037_billofmaterials_amount.py | mgeorge8/django_time | f75a442941b0ebbb6cc46a6d18e42b91695b7e57 | [
"MIT"
] | 1 | 2018-11-09T02:09:14.000Z | 2018-11-09T02:09:14.000Z | mrp_system/migrations/0037_billofmaterials_amount.py | mgeorge8/django_time | f75a442941b0ebbb6cc46a6d18e42b91695b7e57 | [
"MIT"
] | null | null | null | mrp_system/migrations/0037_billofmaterials_amount.py | mgeorge8/django_time | f75a442941b0ebbb6cc46a6d18e42b91695b7e57 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.2 on 2019-01-11 14:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mrp_system', '0036_auto_20190111_1357'),
]
operations = [
migrations.AddField(
model_name='billofmaterials',
name='amount',
field=models.IntegerField(blank=True, default=1, null=True),
),
]
| 22 | 72 | 0.614833 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mrp_system', '0036_auto_20190111_1357'),
]
operations = [
migrations.AddField(
model_name='billofmaterials',
name='amount',
field=models.IntegerField(blank=True, default=1, null=True),
),
]
| true | true |
f71acf1492f4b14baf2359d08fc5b2e0b4e5994f | 56,230 | py | Python | src/transformers/modeling_t5.py | kushalj001/transformers | 0538820737bd8fb9ba1eb3a772412c6bbe2433ab | [
"Apache-2.0"
] | 1 | 2020-10-30T09:05:17.000Z | 2020-10-30T09:05:17.000Z | src/transformers/modeling_t5.py | kushalj001/transformers | 0538820737bd8fb9ba1eb3a772412c6bbe2433ab | [
"Apache-2.0"
] | null | null | null | src/transformers/modeling_t5.py | kushalj001/transformers | 0538820737bd8fb9ba1eb3a772412c6bbe2433ab | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch T5 model. """
import copy
import math
import os
import warnings
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn import CrossEntropyLoss
from .configuration_t5 import T5Config
from .file_utils import (
DUMMY_INPUTS,
DUMMY_MASK,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from .modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, Seq2SeqLMOutput, Seq2SeqModelOutput
from .modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
from .utils import logging
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "T5Config"
_TOKENIZER_FOR_DOC = "T5Tokenizer"
####################################################
# This dict contains shortcut names and associated url
# for the pretrained weights provided with the models
####################################################
T5_PRETRAINED_MODEL_ARCHIVE_LIST = [
"t5-small",
"t5-base",
"t5-large",
"t5-3b",
"t5-11b",
# See all T5 models at https://huggingface.co/models?filter=t5
]
####################################################
# This is a conversion method from TF 1.0 to PyTorch
# More details: https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28
####################################################
def load_tf_weights_in_t5(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
tf_weights = {}
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
tf_weights[name] = array
for txt_name in names:
name = txt_name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info("Skipping {}".format("/".join(name)))
tf_weights.pop(txt_name, None)
continue
if "_slot_" in name[-1]:
logger.info("Skipping {}".format("/".join(name)))
tf_weights.pop(txt_name, None)
continue
pointer = model
array = tf_weights[txt_name]
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
# elif scope_names[0] == 'scale':
# pointer = getattr(pointer, 'weight')
# elif scope_names[0] == 'output_bias' or scope_names[0] == 'beta':
# pointer = getattr(pointer, 'bias')
# elif scope_names[0] == 'squad':
# pointer = getattr(pointer, 'classifier')
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if scope_names[0] not in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
if scope_names[0] != "embedding":
logger.info("Transposing numpy weight of shape {} for {}".format(array.shape, name))
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array.astype(np.float32))
tf_weights.pop(txt_name, None)
logger.info("Weights not copied to PyTorch model: {}".format(", ".join(tf_weights.keys())))
# logger.info("Weights not copied to PyTorch model: {}".format(', '.join(tf_weights.keys())))
return model
####################################################
# PyTorch Models are constructed by sub-classing
# - torch.nn.Module for the layers and
# - PreTrainedModel for the models (it-self a sub-class of torch.nn.Module)
####################################################
class T5LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Construct a layernorm module in the T5 style No bias and no subtraction of mean.
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
# layer norm should always be calculated in float32
variance = x.to(torch.float32).pow(2).mean(-1, keepdim=True)
x = x / torch.sqrt(variance + self.variance_epsilon)
if self.weight.dtype == torch.float16:
x = x.to(torch.float16)
return self.weight * x
class T5DenseReluDense(nn.Module):
def __init__(self, config):
super().__init__()
self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
h = self.wi(hidden_states)
h = F.relu(h)
h = self.dropout(h)
h = self.wo(h)
return h
class T5LayerFF(nn.Module):
def __init__(self, config):
super().__init__()
self.DenseReluDense = T5DenseReluDense(config)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
norm_x = self.layer_norm(hidden_states)
y = self.DenseReluDense(norm_x)
layer_output = hidden_states + self.dropout(y)
return layer_output
class T5Attention(nn.Module):
def __init__(self, config: T5Config, has_relative_attention_bias=False, is_bidirectional=False):
super().__init__()
self.is_bidirectional = is_bidirectional
self.is_decoder = config.is_decoder
self.has_relative_attention_bias = has_relative_attention_bias
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.d_model = config.d_model
self.d_kv = config.d_kv
self.n_heads = config.num_heads
self.dropout = config.dropout_rate
self.inner_dim = self.n_heads * self.d_kv
# Mesh TensorFlow initialization to avoid scaling before softmax
self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, self.d_kv, self.pruned_heads)
# Prune linear layers
self.q = prune_linear_layer(self.q, index)
self.k = prune_linear_layer(self.k, index)
self.v = prune_linear_layer(self.v, index)
self.o = prune_linear_layer(self.o, index, dim=1)
# Update hyper params
self.n_heads = self.n_heads - len(heads)
self.inner_dim = self.d_kv * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
@staticmethod
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
ret = 0
n = -relative_position
if bidirectional:
num_buckets //= 2
ret += (n < 0).to(torch.long) * num_buckets # mtf.to_int32(mtf.less(n, 0)) * num_buckets
n = torch.abs(n)
else:
n = torch.max(n, torch.zeros_like(n))
# now n is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = n < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).to(torch.long)
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
def compute_bias(self, qlen, klen):
""" Compute binned relative position bias """
context_position = torch.arange(qlen, dtype=torch.long)[:, None]
memory_position = torch.arange(klen, dtype=torch.long)[None, :]
relative_position = memory_position - context_position # shape (qlen, klen)
rp_bucket = self._relative_position_bucket(
relative_position, # shape (qlen, klen)
bidirectional=self.is_bidirectional,
num_buckets=self.relative_attention_num_buckets,
)
rp_bucket = rp_bucket.to(self.relative_attention_bias.weight.device)
values = self.relative_attention_bias(rp_bucket) # shape (qlen, klen, num_heads)
values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, qlen, klen)
return values
def forward(
self,
input,
mask=None,
kv=None,
position_bias=None,
past_key_value=None,
head_mask=None,
query_length=None,
use_cache=False,
output_attentions=False,
):
"""
Self-attention (if kv is None) or attention over source sentence (provided by kv).
"""
# Input is (bs, qlen, dim)
# Mask is (bs, klen) (non-causal) or (bs, klen, klen)
# past_key_value[0] is (bs, n_heads, q_len - 1, dim_per_head)
bs, qlen, dim = input.size()
if past_key_value is not None:
assert self.is_decoder is True, "Encoder cannot cache past key value states"
assert (
len(past_key_value) == 2
), "past_key_value should have 2 past states: keys and values. Got {} past states".format(
len(past_key_value)
)
real_qlen = qlen + past_key_value[0].shape[2] if query_length is None else query_length
else:
real_qlen = qlen
if kv is None:
klen = real_qlen
else:
klen = kv.size(1)
def shape(x):
""" projection """
return x.view(bs, -1, self.n_heads, self.d_kv).transpose(1, 2)
def unshape(x):
""" compute context """
return x.transpose(1, 2).contiguous().view(bs, -1, self.inner_dim)
q = shape(self.q(input)) # (bs, n_heads, qlen, dim_per_head)
if kv is None:
k = shape(self.k(input)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v(input)) # (bs, n_heads, qlen, dim_per_head)
elif past_key_value is None:
k = v = kv
k = shape(self.k(k)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v(v)) # (bs, n_heads, qlen, dim_per_head)
if past_key_value is not None:
if kv is None:
k_, v_ = past_key_value
k = torch.cat([k_, k], dim=2) # (bs, n_heads, klen, dim_per_head)
v = torch.cat([v_, v], dim=2) # (bs, n_heads, klen, dim_per_head)
else:
k, v = past_key_value
if self.is_decoder and use_cache is True:
present_key_value_state = ((k, v),)
else:
present_key_value_state = (None,)
# (bs, n_heads, qlen, klen)
scores = torch.matmul(
q, k.transpose(3, 2)
) # equivalent of torch.einsum("bnqd,bnkd->bnqk", q, k), compatible with onnx op>9
if position_bias is None:
if not self.has_relative_attention_bias:
raise ValueError("No position_bias provided and no weights to compute position_bias")
position_bias = self.compute_bias(real_qlen, klen)
# if key and values are already calculated
# we want only the last query position bias
if past_key_value is not None:
position_bias = position_bias[:, :, -qlen:, :]
if mask is not None:
position_bias = position_bias + mask # (bs, n_heads, qlen, klen)
scores += position_bias
weights = F.softmax(scores.float(), dim=-1).type_as(scores) # (bs, n_heads, qlen, klen)
weights = F.dropout(weights, p=self.dropout, training=self.training) # (bs, n_heads, qlen, klen)
# Mask heads if we want to
if head_mask is not None:
weights = weights * head_mask
context = torch.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
context = unshape(context) # (bs, qlen, dim)
context = self.o(context)
outputs = (context,) + present_key_value_state
if output_attentions:
outputs = outputs + (weights,)
if self.has_relative_attention_bias:
outputs = outputs + (position_bias,)
return outputs
class T5LayerSelfAttention(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.SelfAttention = T5Attention(
config, has_relative_attention_bias=has_relative_attention_bias, is_bidirectional=not config.is_decoder
)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
):
norm_x = self.layer_norm(hidden_states)
attention_output = self.SelfAttention(
norm_x,
mask=attention_mask,
position_bias=position_bias,
head_mask=head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
y = attention_output[0]
layer_output = hidden_states + self.dropout(y)
outputs = (layer_output,) + attention_output[1:] # add attentions if we output them
return outputs
class T5LayerCrossAttention(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.EncDecAttention = T5Attention(
config, has_relative_attention_bias=has_relative_attention_bias, is_bidirectional=True
)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
kv,
attention_mask=None,
position_bias=None,
head_mask=None,
past_key_value=None,
use_cache=False,
query_length=None,
output_attentions=False,
):
norm_x = self.layer_norm(hidden_states)
attention_output = self.EncDecAttention(
norm_x,
mask=attention_mask,
kv=kv,
position_bias=position_bias,
head_mask=head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
query_length=query_length,
output_attentions=output_attentions,
)
y = attention_output[0]
layer_output = hidden_states + self.dropout(y)
outputs = (layer_output,) + attention_output[1:] # add attentions if we output them
return outputs
class T5Block(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.is_decoder = config.is_decoder
self.layer = nn.ModuleList()
self.layer.append(T5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias))
if self.is_decoder:
self.layer.append(T5LayerCrossAttention(config, has_relative_attention_bias=has_relative_attention_bias))
self.layer.append(T5LayerFF(config))
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
encoder_decoder_position_bias=None,
head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
):
if past_key_value is not None:
assert self.is_decoder, "Only decoder can use `past_key_values`"
expected_num_past_key_values = 2 if encoder_hidden_states is None else 4
error_message = "There should be {} past states. 2 (past / key) for self attention.{} Got {} past key / value states".format(
expected_num_past_key_values,
"2 (past / key) for cross attention" if expected_num_past_key_values == 4 else "",
len(past_key_value),
)
assert len(past_key_value) == expected_num_past_key_values, error_message
self_attn_past_key_value = past_key_value[:2]
cross_attn_past_key_value = past_key_value[2:]
else:
self_attn_past_key_value, cross_attn_past_key_value = None, None
self_attention_outputs = self.layer[0](
hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
head_mask=head_mask,
past_key_value=self_attn_past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states, present_key_value_state = self_attention_outputs[:2]
attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights
if self.is_decoder and encoder_hidden_states is not None:
# the actual query length is unknown for cross attention
# if using past key value states. Need to inject it here
if present_key_value_state is not None:
query_length = present_key_value_state[0].shape[2]
else:
query_length = None
cross_attention_outputs = self.layer[1](
hidden_states,
kv=encoder_hidden_states,
attention_mask=encoder_attention_mask,
position_bias=encoder_decoder_position_bias,
head_mask=head_mask,
past_key_value=cross_attn_past_key_value,
query_length=query_length,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = cross_attention_outputs[0]
# Combine self attn and cross attn key value states
if present_key_value_state is not None:
present_key_value_state = present_key_value_state + cross_attention_outputs[1]
# Keep cross-attention outputs and relative position weights
attention_outputs = attention_outputs + cross_attention_outputs[2:]
# Apply Feed Forward layer
hidden_states = self.layer[-1](hidden_states)
outputs = (hidden_states,)
# Add attentions if we output them
outputs = outputs + (present_key_value_state,) + attention_outputs
return outputs # hidden-states, present_key_value_states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)
class T5PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = T5Config
load_tf_weights = load_tf_weights_in_t5
base_model_prefix = "transformer"
@property
def dummy_inputs(self):
input_ids = torch.tensor(DUMMY_INPUTS)
input_mask = torch.tensor(DUMMY_MASK)
dummy_inputs = {
"decoder_input_ids": input_ids,
"input_ids": input_ids,
"decoder_attention_mask": input_mask,
}
return dummy_inputs
def _init_weights(self, module):
""" Initialize the weights """
factor = self.config.initializer_factor # Used for testing weights initialization
if isinstance(module, T5LayerNorm):
module.weight.data.fill_(factor * 1.0)
elif isinstance(module, (T5Model, T5ForConditionalGeneration)):
# Mesh TensorFlow embeddings initialization
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624
module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0)
elif isinstance(module, T5DenseReluDense):
# Mesh TensorFlow FF initialization
# See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56
# and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89
module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.wi, "bias") and module.wi.bias is not None:
module.wi.bias.data.zero_()
module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
if hasattr(module.wo, "bias") and module.wo.bias is not None:
module.wo.bias.data.zero_()
elif isinstance(module, T5Attention):
# Mesh TensorFlow attention initialization to avoid scaling before softmax
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136
d_model = self.config.d_model
d_kv = self.config.d_kv
n_heads = self.config.num_heads
module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * d_kv) ** -0.5))
module.k.weight.data.normal_(mean=0.0, std=factor * (d_model ** -0.5))
module.v.weight.data.normal_(mean=0.0, std=factor * (d_model ** -0.5))
module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * d_kv) ** -0.5))
if module.has_relative_attention_bias:
module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5))
def _shift_right(self, input_ids):
decoder_start_token_id = self.config.decoder_start_token_id
pad_token_id = self.config.pad_token_id
assert (
decoder_start_token_id is not None
), "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. See T5 docs for more information"
# shift inputs to the right
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = decoder_start_token_id
assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
assert torch.all(shifted_input_ids >= 0).item(), "Verify that `shifted_input_ids` has only positive values"
return shifted_input_ids
class T5Stack(T5PreTrainedModel):
def __init__(self, config, embed_tokens=None):
super().__init__(config)
self.embed_tokens = embed_tokens
self.is_decoder = config.is_decoder
self.block = nn.ModuleList(
[T5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)]
)
self.final_layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
self.init_weights()
def get_input_embeddings(self):
return self.embed_tokens
def get_output_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, new_embeddings):
self.embed_tokens = new_embeddings
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
inputs_embeds=None,
head_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(
f"You cannot specify both {err_msg_prefix}inputs and {err_msg_prefix}inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(f"You have to specify either {err_msg_prefix}inputs or {err_msg_prefix}inputs_embeds")
if inputs_embeds is None:
assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings"
inputs_embeds = self.embed_tokens(input_ids)
batch_size, seq_length = input_shape
# required mask seq length can be calculated via length of past
mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length
if use_cache is True:
assert self.is_decoder, ":obj:`use_cache` can only be set to `True` if {} is used as a decoder".format(
self
)
if attention_mask is None:
attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device)
if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None:
encoder_seq_length = encoder_hidden_states.shape[1]
encoder_attention_mask = torch.ones(
batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long
)
# initialize past_key_values with `None` if past does not exist
if past_key_values is None:
past_key_values = [None] * len(self.block)
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, inputs_embeds.device)
if self.is_decoder and encoder_attention_mask is not None:
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
head_mask = self.get_head_mask(head_mask, self.config.num_layers)
present_key_value_states = () if use_cache else None
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
position_bias = None
encoder_decoder_position_bias = None
hidden_states = self.dropout(inputs_embeds)
for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states,
attention_mask=extended_attention_mask,
position_bias=position_bias,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
encoder_decoder_position_bias=encoder_decoder_position_bias,
head_mask=head_mask[i],
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
# layer_outputs is a tuple with:
# hidden-states, key-value-states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)
hidden_states, present_key_value_state = layer_outputs[:2]
if i == 0:
# We share the position biases between the layers - the first layer store them
# layer_outputs = hidden-states, key-value-states (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)
position_bias = layer_outputs[3 if output_attentions else 2]
if self.is_decoder and encoder_hidden_states is not None:
encoder_decoder_position_bias = layer_outputs[5 if output_attentions else 3]
# append next layer key value states
if use_cache:
present_key_value_states = present_key_value_states + (present_key_value_state,)
if output_attentions:
all_attentions = all_attentions + (layer_outputs[2],) # We keep only self-attention weights for now
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, present_key_value_states, all_hidden_states, all_attentions]
if v is not None
)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=present_key_value_states,
hidden_states=all_hidden_states,
attentions=all_attentions,
)
T5_START_DOCSTRING = r"""
The T5 model was proposed in `Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer
<https://arxiv.org/abs/1910.10683>`__ by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang,
Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It's an encoder decoder transformer pre-trained in a text-to-text
denoising generative setting.
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.T5Config`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
T5_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you
should be able to pad the inputs on both the right and the left.
Indices can be obtained using :class:`~transformers.T5Tokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
detail.
To know more on how to prepare :obj:`input_ids` for pretraining take a look a `T5 Training
<./t5.html#training>`__.
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Provide for sequence to sequence training. T5 uses the :obj:`pad_token_id` as the starting token for
:obj:`decoder_input_ids` generation. If :obj:`past_key_values` is used, optionally only the last
:obj:`decoder_input_ids` have to be input (see :obj:`past_key_values`).
To know more on how to prepare :obj:`decoder_input_ids` for pretraining take a look at `T5 Training
<./t5.html#training>`__. If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset,
:obj:`decoder_input_ids` takes the value of :obj:`input_ids`.
decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, tgt_seq_len)`, `optional`):
Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will
also be used by default.
encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`):
Tuple consists of (:obj:`last_hidden_state`, :obj:`optional`: `hidden_states`, :obj:`optional`:
`attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)` is a
sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of
the decoder.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded
representation. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_inputs_embeds`
have to be input (see :obj:`past_key_values`). This is useful if you want more control over how to convert
:obj:`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset, :obj:`decoder_inputs_embeds`
takes the value of :obj:`inputs_embeds`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare T5 Model transformer outputting raw hidden-states" "without any specific head on top.",
T5_START_DOCSTRING,
)
class T5Model(T5PreTrainedModel):
def __init__(self, config: T5Config):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = T5Stack(decoder_config, self.shared)
self.init_weights()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
past_key_values=None,
head_mask=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
Returns:
Example::
>>> from transformers import T5Tokenizer, T5Model
>>> tokenizer = T5Tokenizer.from_pretrained('t5-small')
>>> model = T5Model.from_pretrained('t5-small')
>>> input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids, return_dict=True)
>>> last_hidden_states = outputs.last_hidden_state
"""
if "decoder_past_key_value_states" in kwargs:
warnings.warn(
"The `decoder_past_key_value_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = kwargs.pop("decoder_past_key_value_states")
if "decoder_past_key_values" in kwargs:
warnings.warn(
"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = kwargs.pop("decoder_past_key_values")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings("""T5 Model with a `language modeling` head on top. """, T5_START_DOCSTRING)
class T5ForConditionalGeneration(T5PreTrainedModel):
authorized_missing_keys = [r"encoder\.embed_tokens\.weight", r"decoder\.embed_tokens\.weight", r"lm_head\.weight"]
def __init__(self, config):
super().__init__(config)
self.model_dim = config.d_model
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = T5Stack(decoder_config, self.shared)
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
self.init_weights()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def get_output_embeddings(self):
return self.lm_head
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
@add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
past_key_values=None,
head_mask=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[-100, 0, ...,
config.vocab_size - 1]`. All labels set to ``-100`` are ignored (masked), the loss is only computed for
labels in ``[0, ..., config.vocab_size]``
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
Examples::
>>> from transformers import T5Tokenizer, T5ForConditionalGeneration
>>> tokenizer = T5Tokenizer.from_pretrained('t5-small')
>>> model = T5ForConditionalGeneration.from_pretrained('t5-small', return_dict=True)
>>> input_ids = tokenizer('The <extra_id_0> walks in <extra_id_1> park', return_tensors='pt').input_ids
labels = tokenizer('<extra_id_0> cute dog <extra_id_1> the <extra_id_2> </s>', return_tensors='pt').input_ids
>>> outputs = model(input_ids=input_ids, labels=labels)
>>> loss = outputs.loss
>>> logits = outputs.logits
>>> input_ids = tokenizer("summarize: studies have shown that owning a dog is good for you ", return_tensors="pt").input_ids # Batch size 1
>>> outputs = model.generate(input_ids)
"""
if "lm_labels" in kwargs:
warnings.warn(
"The `lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("lm_labels")
if "decoder_past_key_value_states" in kwargs:
warnings.warn(
"The `decoder_past_key_value_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = kwargs.pop("decoder_past_key_value_states")
if "decoder_past_key_values" in kwargs:
warnings.warn(
"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = kwargs.pop("decoder_past_key_values")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
# Convert encoder inputs in embeddings if needed
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self._shift_right(labels)
# If decoding with past key value states, only the last tokens
# should be given as an input
if past_key_values is not None:
assert labels is None, "Decoder should not use cached key value states when training."
if decoder_input_ids is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
if decoder_inputs_embeds is not None:
decoder_inputs_embeds = decoder_inputs_embeds[:, -1:]
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = decoder_outputs[0]
# Rescale output before projecting on vocab
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
sequence_output = sequence_output * (self.model_dim ** -0.5)
lm_logits = self.lm_head(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-100)
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
# TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666
if not return_dict:
output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs
return ((loss,) + output) if loss is not None else output
return Seq2SeqLMOutput(
loss=loss,
logits=lm_logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, past, attention_mask, use_cache, encoder_outputs, **kwargs):
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {
"decoder_input_ids": input_ids,
"past_key_values": past,
"encoder_outputs": encoder_outputs,
"attention_mask": attention_mask,
"use_cache": use_cache,
}
def _reorder_cache(self, past, beam_idx):
# if decoder past is not included in output
# speedy decoding is disabled and no need to reorder
if past is None:
logger.warning("You might want to consider setting `use_cache=True` to speed up decoding")
return past
reordered_decoder_past = ()
for layer_past_states in past:
# get the correct batch idx from layer past batch dim
# batch dim of `past` is at 2nd position
reordered_layer_past_states = ()
for layer_past_state in layer_past_states:
# need to set correct `past` for each of the four key / value states
reordered_layer_past_states = reordered_layer_past_states + (
layer_past_state.index_select(0, beam_idx),
)
assert reordered_layer_past_states[0].shape == layer_past_states[0].shape
assert len(reordered_layer_past_states) == len(layer_past_states)
reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)
return reordered_decoder_past
| 44.205975 | 213 | 0.650542 |
import copy
import math
import os
import warnings
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn import CrossEntropyLoss
from .configuration_t5 import T5Config
from .file_utils import (
DUMMY_INPUTS,
DUMMY_MASK,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from .modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, Seq2SeqLMOutput, Seq2SeqModelOutput
from .modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
from .utils import logging
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "T5Config"
_TOKENIZER_FOR_DOC = "T5Tokenizer"
use_cache=False,
output_attentions=False,
):
bs, qlen, dim = input.size()
if past_key_value is not None:
assert self.is_decoder is True, "Encoder cannot cache past key value states"
assert (
len(past_key_value) == 2
), "past_key_value should have 2 past states: keys and values. Got {} past states".format(
len(past_key_value)
)
real_qlen = qlen + past_key_value[0].shape[2] if query_length is None else query_length
else:
real_qlen = qlen
if kv is None:
klen = real_qlen
else:
klen = kv.size(1)
def shape(x):
return x.view(bs, -1, self.n_heads, self.d_kv).transpose(1, 2)
def unshape(x):
return x.transpose(1, 2).contiguous().view(bs, -1, self.inner_dim)
q = shape(self.q(input))
if kv is None:
k = shape(self.k(input))
v = shape(self.v(input))
elif past_key_value is None:
k = v = kv
k = shape(self.k(k))
v = shape(self.v(v))
if past_key_value is not None:
if kv is None:
k_, v_ = past_key_value
k = torch.cat([k_, k], dim=2)
v = torch.cat([v_, v], dim=2)
else:
k, v = past_key_value
if self.is_decoder and use_cache is True:
present_key_value_state = ((k, v),)
else:
present_key_value_state = (None,)
scores = torch.matmul(
q, k.transpose(3, 2)
)
if position_bias is None:
if not self.has_relative_attention_bias:
raise ValueError("No position_bias provided and no weights to compute position_bias")
position_bias = self.compute_bias(real_qlen, klen)
if past_key_value is not None:
position_bias = position_bias[:, :, -qlen:, :]
if mask is not None:
position_bias = position_bias + mask
scores += position_bias
weights = F.softmax(scores.float(), dim=-1).type_as(scores)
weights = F.dropout(weights, p=self.dropout, training=self.training)
if head_mask is not None:
weights = weights * head_mask
context = torch.matmul(weights, v)
context = unshape(context)
context = self.o(context)
outputs = (context,) + present_key_value_state
if output_attentions:
outputs = outputs + (weights,)
if self.has_relative_attention_bias:
outputs = outputs + (position_bias,)
return outputs
class T5LayerSelfAttention(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.SelfAttention = T5Attention(
config, has_relative_attention_bias=has_relative_attention_bias, is_bidirectional=not config.is_decoder
)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
):
norm_x = self.layer_norm(hidden_states)
attention_output = self.SelfAttention(
norm_x,
mask=attention_mask,
position_bias=position_bias,
head_mask=head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
y = attention_output[0]
layer_output = hidden_states + self.dropout(y)
outputs = (layer_output,) + attention_output[1:]
return outputs
class T5LayerCrossAttention(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.EncDecAttention = T5Attention(
config, has_relative_attention_bias=has_relative_attention_bias, is_bidirectional=True
)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
kv,
attention_mask=None,
position_bias=None,
head_mask=None,
past_key_value=None,
use_cache=False,
query_length=None,
output_attentions=False,
):
norm_x = self.layer_norm(hidden_states)
attention_output = self.EncDecAttention(
norm_x,
mask=attention_mask,
kv=kv,
position_bias=position_bias,
head_mask=head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
query_length=query_length,
output_attentions=output_attentions,
)
y = attention_output[0]
layer_output = hidden_states + self.dropout(y)
outputs = (layer_output,) + attention_output[1:]
return outputs
class T5Block(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.is_decoder = config.is_decoder
self.layer = nn.ModuleList()
self.layer.append(T5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias))
if self.is_decoder:
self.layer.append(T5LayerCrossAttention(config, has_relative_attention_bias=has_relative_attention_bias))
self.layer.append(T5LayerFF(config))
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
encoder_decoder_position_bias=None,
head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
):
if past_key_value is not None:
assert self.is_decoder, "Only decoder can use `past_key_values`"
expected_num_past_key_values = 2 if encoder_hidden_states is None else 4
error_message = "There should be {} past states. 2 (past / key) for self attention.{} Got {} past key / value states".format(
expected_num_past_key_values,
"2 (past / key) for cross attention" if expected_num_past_key_values == 4 else "",
len(past_key_value),
)
assert len(past_key_value) == expected_num_past_key_values, error_message
self_attn_past_key_value = past_key_value[:2]
cross_attn_past_key_value = past_key_value[2:]
else:
self_attn_past_key_value, cross_attn_past_key_value = None, None
self_attention_outputs = self.layer[0](
hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
head_mask=head_mask,
past_key_value=self_attn_past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states, present_key_value_state = self_attention_outputs[:2]
attention_outputs = self_attention_outputs[2:]
if self.is_decoder and encoder_hidden_states is not None:
if present_key_value_state is not None:
query_length = present_key_value_state[0].shape[2]
else:
query_length = None
cross_attention_outputs = self.layer[1](
hidden_states,
kv=encoder_hidden_states,
attention_mask=encoder_attention_mask,
position_bias=encoder_decoder_position_bias,
head_mask=head_mask,
past_key_value=cross_attn_past_key_value,
query_length=query_length,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = cross_attention_outputs[0]
if present_key_value_state is not None:
present_key_value_state = present_key_value_state + cross_attention_outputs[1]
attention_outputs = attention_outputs + cross_attention_outputs[2:]
hidden_states = self.layer[-1](hidden_states)
outputs = (hidden_states,)
outputs = outputs + (present_key_value_state,) + attention_outputs
return outputs
class T5PreTrainedModel(PreTrainedModel):
config_class = T5Config
load_tf_weights = load_tf_weights_in_t5
base_model_prefix = "transformer"
@property
def dummy_inputs(self):
input_ids = torch.tensor(DUMMY_INPUTS)
input_mask = torch.tensor(DUMMY_MASK)
dummy_inputs = {
"decoder_input_ids": input_ids,
"input_ids": input_ids,
"decoder_attention_mask": input_mask,
}
return dummy_inputs
def _init_weights(self, module):
factor = self.config.initializer_factor
if isinstance(module, T5LayerNorm):
module.weight.data.fill_(factor * 1.0)
elif isinstance(module, (T5Model, T5ForConditionalGeneration)):
module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0)
elif isinstance(module, T5DenseReluDense):
module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.wi, "bias") and module.wi.bias is not None:
module.wi.bias.data.zero_()
module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
if hasattr(module.wo, "bias") and module.wo.bias is not None:
module.wo.bias.data.zero_()
elif isinstance(module, T5Attention):
d_model = self.config.d_model
d_kv = self.config.d_kv
n_heads = self.config.num_heads
module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * d_kv) ** -0.5))
module.k.weight.data.normal_(mean=0.0, std=factor * (d_model ** -0.5))
module.v.weight.data.normal_(mean=0.0, std=factor * (d_model ** -0.5))
module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * d_kv) ** -0.5))
if module.has_relative_attention_bias:
module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5))
def _shift_right(self, input_ids):
decoder_start_token_id = self.config.decoder_start_token_id
pad_token_id = self.config.pad_token_id
assert (
decoder_start_token_id is not None
), "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. See T5 docs for more information"
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = decoder_start_token_id
assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
assert torch.all(shifted_input_ids >= 0).item(), "Verify that `shifted_input_ids` has only positive values"
return shifted_input_ids
class T5Stack(T5PreTrainedModel):
def __init__(self, config, embed_tokens=None):
super().__init__(config)
self.embed_tokens = embed_tokens
self.is_decoder = config.is_decoder
self.block = nn.ModuleList(
[T5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)]
)
self.final_layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
self.init_weights()
def get_input_embeddings(self):
return self.embed_tokens
def get_output_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, new_embeddings):
self.embed_tokens = new_embeddings
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
inputs_embeds=None,
head_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(
f"You cannot specify both {err_msg_prefix}inputs and {err_msg_prefix}inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(f"You have to specify either {err_msg_prefix}inputs or {err_msg_prefix}inputs_embeds")
if inputs_embeds is None:
assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings"
inputs_embeds = self.embed_tokens(input_ids)
batch_size, seq_length = input_shape
mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length
if use_cache is True:
assert self.is_decoder, ":obj:`use_cache` can only be set to `True` if {} is used as a decoder".format(
self
)
if attention_mask is None:
attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device)
if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None:
encoder_seq_length = encoder_hidden_states.shape[1]
encoder_attention_mask = torch.ones(
batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long
)
if past_key_values is None:
past_key_values = [None] * len(self.block)
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, inputs_embeds.device)
if self.is_decoder and encoder_attention_mask is not None:
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
head_mask = self.get_head_mask(head_mask, self.config.num_layers)
present_key_value_states = () if use_cache else None
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
position_bias = None
encoder_decoder_position_bias = None
hidden_states = self.dropout(inputs_embeds)
for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states,
attention_mask=extended_attention_mask,
position_bias=position_bias,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
encoder_decoder_position_bias=encoder_decoder_position_bias,
head_mask=head_mask[i],
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states, present_key_value_state = layer_outputs[:2]
if i == 0:
position_bias = layer_outputs[3 if output_attentions else 2]
if self.is_decoder and encoder_hidden_states is not None:
encoder_decoder_position_bias = layer_outputs[5 if output_attentions else 3]
if use_cache:
present_key_value_states = present_key_value_states + (present_key_value_state,)
if output_attentions:
all_attentions = all_attentions + (layer_outputs[2],)
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, present_key_value_states, all_hidden_states, all_attentions]
if v is not None
)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=present_key_value_states,
hidden_states=all_hidden_states,
attentions=all_attentions,
)
T5_START_DOCSTRING = r"""
The T5 model was proposed in `Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer
<https://arxiv.org/abs/1910.10683>`__ by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang,
Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It's an encoder decoder transformer pre-trained in a text-to-text
denoising generative setting.
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.T5Config`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
T5_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you
should be able to pad the inputs on both the right and the left.
Indices can be obtained using :class:`~transformers.T5Tokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
detail.
To know more on how to prepare :obj:`input_ids` for pretraining take a look a `T5 Training
<./t5.html#training>`__.
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Provide for sequence to sequence training. T5 uses the :obj:`pad_token_id` as the starting token for
:obj:`decoder_input_ids` generation. If :obj:`past_key_values` is used, optionally only the last
:obj:`decoder_input_ids` have to be input (see :obj:`past_key_values`).
To know more on how to prepare :obj:`decoder_input_ids` for pretraining take a look at `T5 Training
<./t5.html#training>`__. If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset,
:obj:`decoder_input_ids` takes the value of :obj:`input_ids`.
decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, tgt_seq_len)`, `optional`):
Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will
also be used by default.
encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`):
Tuple consists of (:obj:`last_hidden_state`, :obj:`optional`: `hidden_states`, :obj:`optional`:
`attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)` is a
sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of
the decoder.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded
representation. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_inputs_embeds`
have to be input (see :obj:`past_key_values`). This is useful if you want more control over how to convert
:obj:`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset, :obj:`decoder_inputs_embeds`
takes the value of :obj:`inputs_embeds`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare T5 Model transformer outputting raw hidden-states" "without any specific head on top.",
T5_START_DOCSTRING,
)
class T5Model(T5PreTrainedModel):
def __init__(self, config: T5Config):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = T5Stack(decoder_config, self.shared)
self.init_weights()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def _prune_heads(self, heads_to_prune):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
past_key_values=None,
head_mask=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
if "decoder_past_key_value_states" in kwargs:
warnings.warn(
"The `decoder_past_key_value_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = kwargs.pop("decoder_past_key_value_states")
if "decoder_past_key_values" in kwargs:
warnings.warn(
"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = kwargs.pop("decoder_past_key_values")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings("""T5 Model with a `language modeling` head on top. """, T5_START_DOCSTRING)
class T5ForConditionalGeneration(T5PreTrainedModel):
authorized_missing_keys = [r"encoder\.embed_tokens\.weight", r"decoder\.embed_tokens\.weight", r"lm_head\.weight"]
def __init__(self, config):
super().__init__(config)
self.model_dim = config.d_model
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = T5Stack(decoder_config, self.shared)
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
self.init_weights()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def get_output_embeddings(self):
return self.lm_head
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
@add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
past_key_values=None,
head_mask=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
if "lm_labels" in kwargs:
warnings.warn(
"The `lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("lm_labels")
if "decoder_past_key_value_states" in kwargs:
warnings.warn(
"The `decoder_past_key_value_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = kwargs.pop("decoder_past_key_value_states")
if "decoder_past_key_values" in kwargs:
warnings.warn(
"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = kwargs.pop("decoder_past_key_values")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = self._shift_right(labels)
if past_key_values is not None:
assert labels is None, "Decoder should not use cached key value states when training."
if decoder_input_ids is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
if decoder_inputs_embeds is not None:
decoder_inputs_embeds = decoder_inputs_embeds[:, -1:]
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = decoder_outputs[0]
sequence_output = sequence_output * (self.model_dim ** -0.5)
lm_logits = self.lm_head(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-100)
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
if not return_dict:
output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs
return ((loss,) + output) if loss is not None else output
return Seq2SeqLMOutput(
loss=loss,
logits=lm_logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, past, attention_mask, use_cache, encoder_outputs, **kwargs):
if past is not None:
input_ids = input_ids[:, -1:]
return {
"decoder_input_ids": input_ids,
"past_key_values": past,
"encoder_outputs": encoder_outputs,
"attention_mask": attention_mask,
"use_cache": use_cache,
}
def _reorder_cache(self, past, beam_idx):
if past is None:
logger.warning("You might want to consider setting `use_cache=True` to speed up decoding")
return past
reordered_decoder_past = ()
for layer_past_states in past:
reordered_layer_past_states = ()
for layer_past_state in layer_past_states:
reordered_layer_past_states = reordered_layer_past_states + (
layer_past_state.index_select(0, beam_idx),
)
assert reordered_layer_past_states[0].shape == layer_past_states[0].shape
assert len(reordered_layer_past_states) == len(layer_past_states)
reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)
return reordered_decoder_past
| true | true |
f71acf41bdacbcba980d2fbc41eeab24cc7554c3 | 1,140 | py | Python | pytanga/components/config.py | renatoalmeidaoliveira/Pytanga | aa02f1c0f2573da1330d1d246ab780fa3be336a5 | [
"MIT"
] | null | null | null | pytanga/components/config.py | renatoalmeidaoliveira/Pytanga | aa02f1c0f2573da1330d1d246ab780fa3be336a5 | [
"MIT"
] | null | null | null | pytanga/components/config.py | renatoalmeidaoliveira/Pytanga | aa02f1c0f2573da1330d1d246ab780fa3be336a5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Config component.
This module defines the config Component.
<config>
</config>
"""
from . import AbstractComponent
class configComponent(AbstractComponent):
def __init__(self):
self._xmlns = {}
self.attributes = {}
self.parent_xmlns = {}
self._children: List[AbstractComponent] = []
self.childrenData = []
self.tag = 'config'
@property
def xmlns(self):
return self._xmlns
@xmlns.setter
def xmlns(self, xmlns):
self._xmlns = xmlns
def add(self, component) -> None:
self._children.append(component)
def remove(self, component) -> None:
self._children.remove(component)
def is_composite(self) -> bool:
return False
def getXMLNS(self):
childrenData = []
for child in self._children:
child.getXMLNS()
return self._xmlns
def parse(self, serializer):
self.childrenData = []
self.getXMLNS()
for child in self._children:
self.childrenData.append(child.parse(serializer))
return serializer.parse(self)
| 21.923077 | 61 | 0.60614 |
from . import AbstractComponent
class configComponent(AbstractComponent):
def __init__(self):
self._xmlns = {}
self.attributes = {}
self.parent_xmlns = {}
self._children: List[AbstractComponent] = []
self.childrenData = []
self.tag = 'config'
@property
def xmlns(self):
return self._xmlns
@xmlns.setter
def xmlns(self, xmlns):
self._xmlns = xmlns
def add(self, component) -> None:
self._children.append(component)
def remove(self, component) -> None:
self._children.remove(component)
def is_composite(self) -> bool:
return False
def getXMLNS(self):
childrenData = []
for child in self._children:
child.getXMLNS()
return self._xmlns
def parse(self, serializer):
self.childrenData = []
self.getXMLNS()
for child in self._children:
self.childrenData.append(child.parse(serializer))
return serializer.parse(self)
| true | true |
f71acfeb35f54faa88ad90bc14c98d37cd3bbfd8 | 97 | py | Python | InvoiceBook_website/backend/InvoiceBook/apps.py | HumbertMeyers/InvoiceBook | 99af326a529566bdcff5c9c4015f2d89d5df2752 | [
"MIT"
] | null | null | null | InvoiceBook_website/backend/InvoiceBook/apps.py | HumbertMeyers/InvoiceBook | 99af326a529566bdcff5c9c4015f2d89d5df2752 | [
"MIT"
] | null | null | null | InvoiceBook_website/backend/InvoiceBook/apps.py | HumbertMeyers/InvoiceBook | 99af326a529566bdcff5c9c4015f2d89d5df2752 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class InvoicebookConfig(AppConfig):
name = 'InvoiceBook'
| 16.166667 | 35 | 0.773196 | from django.apps import AppConfig
class InvoicebookConfig(AppConfig):
name = 'InvoiceBook'
| true | true |
f71ad0247ebf714503f6f8492d0d47f17da35091 | 623 | py | Python | course4/week3-ungraded-labs/C4_W3_Lab_4_Github_Actions/app/main.py | sepam/machine-learning-engineering-for-production-public | cd6053459eee9b7f30bf86da63104b3f1381383a | [
"Apache-2.0"
] | null | null | null | course4/week3-ungraded-labs/C4_W3_Lab_4_Github_Actions/app/main.py | sepam/machine-learning-engineering-for-production-public | cd6053459eee9b7f30bf86da63104b3f1381383a | [
"Apache-2.0"
] | null | null | null | course4/week3-ungraded-labs/C4_W3_Lab_4_Github_Actions/app/main.py | sepam/machine-learning-engineering-for-production-public | cd6053459eee9b7f30bf86da63104b3f1381383a | [
"Apache-2.0"
] | null | null | null | import pickle
import numpy as np
from typing import List
from fastapi import FastAPI
from pydantic import BaseModel, conlist
app = FastAPI(title="Predicting Wine Class with batching")
# Open classifier in global scope
with open("models/wine-95-fixed.pkl", "rb") as file:
clf = pickle.load(file)
class Wine(BaseModel):
batches: List[conlist(item_type=float, min_items=13, max_items=13)]
# make predictions on this endpoint
@app.post("/predict")
def predict(wine: Wine):
batches = wine.batches
np_batches = np.array(batches)
pred = clf.predict(np_batches).tolist()
return {"Prediction": pred}
| 23.074074 | 71 | 0.731942 | import pickle
import numpy as np
from typing import List
from fastapi import FastAPI
from pydantic import BaseModel, conlist
app = FastAPI(title="Predicting Wine Class with batching")
with open("models/wine-95-fixed.pkl", "rb") as file:
clf = pickle.load(file)
class Wine(BaseModel):
batches: List[conlist(item_type=float, min_items=13, max_items=13)]
@app.post("/predict")
def predict(wine: Wine):
batches = wine.batches
np_batches = np.array(batches)
pred = clf.predict(np_batches).tolist()
return {"Prediction": pred}
| true | true |
f71ad074c1c6f98c66a9513ea45897d958bd392f | 1,461 | py | Python | python/config_default_48k.py | entn-at/GlottDNN | b7db669d7f34da92ab34742d75a8ba3c70763a65 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | python/config_default_48k.py | entn-at/GlottDNN | b7db669d7f34da92ab34742d75a8ba3c70763a65 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | python/config_default_48k.py | entn-at/GlottDNN | b7db669d7f34da92ab34742d75a8ba3c70763a65 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # run flags
make_dirs = True
make_scp = True
do_sptk_pitch_analysis = False
do_reaper_pitch_analysis = False
do_glott_vocoder_analysis = False
make_dnn_train_data = False
make_dnn_infofile = False
do_dnn_training = False
do_glott_vocoder_synthesis = True
# directories
prjdir = '/l/CODE/GlottDNN' # add your own local install dir here
datadir = prjdir + '/data/ansyn_jenny'
# general parameters
sampling_frequency = 48000
warping_lambda = 0.54
use_external_gci = False
# programs
reaper = 'reaper'
sox = 'sox'
pitch = '/u/76/mairaksi/unix/Documents/SPTK-3.8/bin/pitch/pitch -a 0 -s 48.0 -o 1 -p 240 -T 0.0 -L 50 -H 500 '
x2x = '/u/76/mairaksi/unix/Documents/SPTK-3.8/bin/x2x/x2x'
Analysis = prjdir + '/src/Analysis'
Synthesis = prjdir + '/src/Synthesis'
config_default = prjdir + '/config/config_default_48k.cfg'
# nn input params
inputs = ['f0', 'gain', 'hnr', 'slsf', 'lsf']
input_exts = ['.f0', '.gain', '.hnr', '.slsf','.lsf']
input_dims = [1, 1, 25, 10, 50] # set feature to zero if not used
outputs = ['pls']
output_exts = ['.pls']
output_dims = [1200]
# dnn data conf
dnn_name = 'nancy48_legacy_same'
train_data_dir = prjdir + '/nndata/traindata/' + dnn_name
weights_data_dir = prjdir + '/nndata/weights/' + dnn_name
data_buffer_size = 1000
remove_unvoiced_frames = True
#train_set = [1, 2 , 3, 4, 5]
train_set = [1]
val_set = [6]
test_set = [7]
# dnn train conf
n_hidden = [250, 250, 250]
learning_rate = 0.1
batch_size = 100
max_epochs = 20000
| 26.563636 | 110 | 0.713895 |
make_dirs = True
make_scp = True
do_sptk_pitch_analysis = False
do_reaper_pitch_analysis = False
do_glott_vocoder_analysis = False
make_dnn_train_data = False
make_dnn_infofile = False
do_dnn_training = False
do_glott_vocoder_synthesis = True
prjdir = '/l/CODE/GlottDNN'
datadir = prjdir + '/data/ansyn_jenny'
sampling_frequency = 48000
warping_lambda = 0.54
use_external_gci = False
reaper = 'reaper'
sox = 'sox'
pitch = '/u/76/mairaksi/unix/Documents/SPTK-3.8/bin/pitch/pitch -a 0 -s 48.0 -o 1 -p 240 -T 0.0 -L 50 -H 500 '
x2x = '/u/76/mairaksi/unix/Documents/SPTK-3.8/bin/x2x/x2x'
Analysis = prjdir + '/src/Analysis'
Synthesis = prjdir + '/src/Synthesis'
config_default = prjdir + '/config/config_default_48k.cfg'
inputs = ['f0', 'gain', 'hnr', 'slsf', 'lsf']
input_exts = ['.f0', '.gain', '.hnr', '.slsf','.lsf']
input_dims = [1, 1, 25, 10, 50]
outputs = ['pls']
output_exts = ['.pls']
output_dims = [1200]
dnn_name = 'nancy48_legacy_same'
train_data_dir = prjdir + '/nndata/traindata/' + dnn_name
weights_data_dir = prjdir + '/nndata/weights/' + dnn_name
data_buffer_size = 1000
remove_unvoiced_frames = True
train_set = [1]
val_set = [6]
test_set = [7]
n_hidden = [250, 250, 250]
learning_rate = 0.1
batch_size = 100
max_epochs = 20000
| true | true |
f71ad0e03a1f64c0b8808cee586a271e9c91b997 | 950 | py | Python | clif/testing/python/non_raising_test.py | wangxf123456/clif | 9bff8a28f5d266d6ea4f4bb0dc1d9c9a0c9ee5b1 | [
"Apache-2.0"
] | 966 | 2017-04-18T04:14:04.000Z | 2022-03-03T21:22:44.000Z | clif/testing/python/non_raising_test.py | wangxf123456/clif | 9bff8a28f5d266d6ea4f4bb0dc1d9c9a0c9ee5b1 | [
"Apache-2.0"
] | 48 | 2017-05-02T23:51:29.000Z | 2021-12-06T19:10:11.000Z | clif/testing/python/non_raising_test.py | wangxf123456/clif | 9bff8a28f5d266d6ea4f4bb0dc1d9c9a0c9ee5b1 | [
"Apache-2.0"
] | 135 | 2017-04-26T06:15:30.000Z | 2022-01-07T02:17:20.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
from clif.testing.python import non_raising
class NonRaisingTest(absltest.TestCase):
def testPlain(self):
num = non_raising.MakeTestNonRaisingPlain()
self.assertEqual(num, 1)
def testMarked(self):
num = non_raising.MakeTestNonRaisingMarked()
self.assertEqual(num, -1)
if __name__ == '__main__':
absltest.main()
| 28.787879 | 74 | 0.753684 |
from absl.testing import absltest
from clif.testing.python import non_raising
class NonRaisingTest(absltest.TestCase):
def testPlain(self):
num = non_raising.MakeTestNonRaisingPlain()
self.assertEqual(num, 1)
def testMarked(self):
num = non_raising.MakeTestNonRaisingMarked()
self.assertEqual(num, -1)
if __name__ == '__main__':
absltest.main()
| true | true |
f71ad1078d00053f655a55288594e5cc3c29cabe | 6,120 | py | Python | custom_components/pandora_cas/binary_sensor.py | alryaz/hass-pandora-cas | 684e9d5e95c89fcf5ac9ede75df3e18c05c03452 | [
"MIT"
] | 15 | 2020-09-15T01:05:05.000Z | 2022-03-15T11:18:07.000Z | custom_components/pandora_cas/binary_sensor.py | alryaz/hass-pandora-cas | 684e9d5e95c89fcf5ac9ede75df3e18c05c03452 | [
"MIT"
] | 12 | 2020-09-13T13:22:27.000Z | 2022-03-15T12:03:45.000Z | custom_components/pandora_cas/binary_sensor.py | alryaz/hass-pandora-cas | 684e9d5e95c89fcf5ac9ede75df3e18c05c03452 | [
"MIT"
] | 2 | 2021-06-03T16:32:22.000Z | 2021-10-01T20:53:50.000Z | """Binary sensor platform for Pandora Car Alarm System."""
__all__ = ["ENTITY_TYPES", "async_setup_entry"]
import logging
from functools import partial
from typing import Any, Dict
import attr
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_CONNECTIVITY,
DEVICE_CLASS_DOOR,
DEVICE_CLASS_MOTION,
DOMAIN as PLATFORM_DOMAIN,
BinarySensorEntity,
ENTITY_ID_FORMAT,
)
from homeassistant.const import ATTR_NAME, ATTR_ICON, ATTR_DEVICE_CLASS
from . import PandoraCASBooleanEntity, async_platform_setup_entry
from .api import BitStatus
from .const import *
_LOGGER = logging.getLogger(__name__)
_car_door_icons = ("mdi:car-door-lock", "mdi:car-door")
_car_glass_icons = ("mdi:car-windshield", "mdi:car-windshield-outline")
ENTITY_TYPES = {
"connection_state": {
ATTR_NAME: "Connection state",
ATTR_DEVICE_CLASS: DEVICE_CLASS_CONNECTIVITY,
ATTR_ATTRIBUTE: "is_online",
ATTR_ATTRIBUTE_SOURCE: True,
},
"moving": {
ATTR_NAME: "Moving",
ATTR_DEVICE_CLASS: DEVICE_CLASS_MOTION,
ATTR_STATE_SENSITIVE: True,
ATTR_ATTRIBUTE: "is_moving",
},
# Status-related sensors
"left_front_door": {
ATTR_NAME: "Left Front Door",
ATTR_ICON: _car_door_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.DOOR_FRONT_LEFT_OPEN,
ATTR_STATE_SENSITIVE: True,
},
"right_front_door": {
ATTR_NAME: "Right Front Door",
ATTR_ICON: _car_door_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.DOOR_FRONT_RIGHT_OPEN,
ATTR_STATE_SENSITIVE: True,
},
"left_back_door": {
ATTR_NAME: "Left Back Door",
ATTR_ICON: _car_door_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.DOOR_BACK_LEFT_OPEN,
ATTR_STATE_SENSITIVE: True,
},
"right_back_door": {
ATTR_NAME: "Right Back Door",
ATTR_ICON: _car_door_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.DOOR_BACK_RIGHT_OPEN,
ATTR_STATE_SENSITIVE: True,
},
"left_front_glass": {
ATTR_NAME: "Left Front Glass",
ATTR_ICON: _car_glass_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "can_glass_front_left",
ATTR_STATE_SENSITIVE: True,
ATTR_DISABLED_BY_DEFAULT: True,
},
"right_front_glass": {
ATTR_NAME: "Right Front Glass",
ATTR_ICON: _car_glass_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "can_glass_front_right",
ATTR_STATE_SENSITIVE: True,
ATTR_DISABLED_BY_DEFAULT: True,
},
"left_back_glass": {
ATTR_NAME: "Left Back Glass",
ATTR_ICON: _car_glass_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "can_glass_back_left",
ATTR_STATE_SENSITIVE: True,
ATTR_DISABLED_BY_DEFAULT: True,
},
"right_back_glass": {
ATTR_NAME: "Right Back Glass",
ATTR_ICON: _car_glass_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "can_glass_back_right",
ATTR_STATE_SENSITIVE: True,
ATTR_DISABLED_BY_DEFAULT: True,
},
"trunk": {
ATTR_NAME: "Trunk",
ATTR_ICON: "mdi:car-back",
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.TRUNK_OPEN,
ATTR_STATE_SENSITIVE: True,
},
"hood": {
ATTR_NAME: "Hood",
ATTR_ICON: "mdi:car",
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.HOOD_OPEN,
ATTR_STATE_SENSITIVE: True,
},
"parking": {
ATTR_NAME: "Parking Mode",
ATTR_ICON: "mdi:car-brake-parking",
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.HANDBRAKE_ENGAGED,
ATTR_STATE_SENSITIVE: True,
},
"brakes": {
ATTR_NAME: "Brakes",
ATTR_ICON: "mdi:car-brake-hold",
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.BRAKES_ENGAGED,
ATTR_STATE_SENSITIVE: True,
},
"ignition": {
ATTR_NAME: "Ignition",
ATTR_ICON: "mdi:key-variant",
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.IGNITION,
},
"exterior_lights": {
ATTR_NAME: "Exterior Lights",
ATTR_ICON: "mdi:car-light-high",
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.EXTERIOR_LIGHTS_ACTIVE,
},
"ev_charging_connected": {
ATTR_NAME: "EV Charging Connected",
ATTR_ICON: "mdi:ev-station",
ATTR_ATTRIBUTE: "ev_charging_connected",
ATTR_STATE_SENSITIVE: True,
ATTR_DISABLED_BY_DEFAULT: True,
},
}
class PandoraCASBinarySensor(PandoraCASBooleanEntity, BinarySensorEntity):
ENTITY_TYPES = ENTITY_TYPES
ENTITY_ID_FORMAT = ENTITY_ID_FORMAT
@property
def is_on(self) -> bool:
"""Return current state of"""
return bool(self._state)
@property
def device_state_attributes(self) -> Dict[str, Any]:
existing_attributes = super().device_state_attributes
entity_type = self._entity_type
if entity_type == "connection_state":
state = self._device.state
if state is not None:
existing_attributes.update(attr.asdict(state, True))
elif entity_type == "ev_charging_connected":
if not self._device.is_online:
return existing_attributes
state = self._device.state
existing_attributes["slow_charging"] = state.ev_charging_slow
existing_attributes["fast_charging"] = state.ev_charging_fast
existing_attributes["ready_status"] = state.ev_status_ready
return existing_attributes
async_setup_entry = partial(
async_platform_setup_entry, PLATFORM_DOMAIN, PandoraCASBinarySensor, logger=_LOGGER
)
| 31.709845 | 87 | 0.662745 | __all__ = ["ENTITY_TYPES", "async_setup_entry"]
import logging
from functools import partial
from typing import Any, Dict
import attr
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_CONNECTIVITY,
DEVICE_CLASS_DOOR,
DEVICE_CLASS_MOTION,
DOMAIN as PLATFORM_DOMAIN,
BinarySensorEntity,
ENTITY_ID_FORMAT,
)
from homeassistant.const import ATTR_NAME, ATTR_ICON, ATTR_DEVICE_CLASS
from . import PandoraCASBooleanEntity, async_platform_setup_entry
from .api import BitStatus
from .const import *
_LOGGER = logging.getLogger(__name__)
_car_door_icons = ("mdi:car-door-lock", "mdi:car-door")
_car_glass_icons = ("mdi:car-windshield", "mdi:car-windshield-outline")
ENTITY_TYPES = {
"connection_state": {
ATTR_NAME: "Connection state",
ATTR_DEVICE_CLASS: DEVICE_CLASS_CONNECTIVITY,
ATTR_ATTRIBUTE: "is_online",
ATTR_ATTRIBUTE_SOURCE: True,
},
"moving": {
ATTR_NAME: "Moving",
ATTR_DEVICE_CLASS: DEVICE_CLASS_MOTION,
ATTR_STATE_SENSITIVE: True,
ATTR_ATTRIBUTE: "is_moving",
},
"left_front_door": {
ATTR_NAME: "Left Front Door",
ATTR_ICON: _car_door_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.DOOR_FRONT_LEFT_OPEN,
ATTR_STATE_SENSITIVE: True,
},
"right_front_door": {
ATTR_NAME: "Right Front Door",
ATTR_ICON: _car_door_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.DOOR_FRONT_RIGHT_OPEN,
ATTR_STATE_SENSITIVE: True,
},
"left_back_door": {
ATTR_NAME: "Left Back Door",
ATTR_ICON: _car_door_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.DOOR_BACK_LEFT_OPEN,
ATTR_STATE_SENSITIVE: True,
},
"right_back_door": {
ATTR_NAME: "Right Back Door",
ATTR_ICON: _car_door_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.DOOR_BACK_RIGHT_OPEN,
ATTR_STATE_SENSITIVE: True,
},
"left_front_glass": {
ATTR_NAME: "Left Front Glass",
ATTR_ICON: _car_glass_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "can_glass_front_left",
ATTR_STATE_SENSITIVE: True,
ATTR_DISABLED_BY_DEFAULT: True,
},
"right_front_glass": {
ATTR_NAME: "Right Front Glass",
ATTR_ICON: _car_glass_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "can_glass_front_right",
ATTR_STATE_SENSITIVE: True,
ATTR_DISABLED_BY_DEFAULT: True,
},
"left_back_glass": {
ATTR_NAME: "Left Back Glass",
ATTR_ICON: _car_glass_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "can_glass_back_left",
ATTR_STATE_SENSITIVE: True,
ATTR_DISABLED_BY_DEFAULT: True,
},
"right_back_glass": {
ATTR_NAME: "Right Back Glass",
ATTR_ICON: _car_glass_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "can_glass_back_right",
ATTR_STATE_SENSITIVE: True,
ATTR_DISABLED_BY_DEFAULT: True,
},
"trunk": {
ATTR_NAME: "Trunk",
ATTR_ICON: "mdi:car-back",
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.TRUNK_OPEN,
ATTR_STATE_SENSITIVE: True,
},
"hood": {
ATTR_NAME: "Hood",
ATTR_ICON: "mdi:car",
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.HOOD_OPEN,
ATTR_STATE_SENSITIVE: True,
},
"parking": {
ATTR_NAME: "Parking Mode",
ATTR_ICON: "mdi:car-brake-parking",
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.HANDBRAKE_ENGAGED,
ATTR_STATE_SENSITIVE: True,
},
"brakes": {
ATTR_NAME: "Brakes",
ATTR_ICON: "mdi:car-brake-hold",
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.BRAKES_ENGAGED,
ATTR_STATE_SENSITIVE: True,
},
"ignition": {
ATTR_NAME: "Ignition",
ATTR_ICON: "mdi:key-variant",
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.IGNITION,
},
"exterior_lights": {
ATTR_NAME: "Exterior Lights",
ATTR_ICON: "mdi:car-light-high",
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.EXTERIOR_LIGHTS_ACTIVE,
},
"ev_charging_connected": {
ATTR_NAME: "EV Charging Connected",
ATTR_ICON: "mdi:ev-station",
ATTR_ATTRIBUTE: "ev_charging_connected",
ATTR_STATE_SENSITIVE: True,
ATTR_DISABLED_BY_DEFAULT: True,
},
}
class PandoraCASBinarySensor(PandoraCASBooleanEntity, BinarySensorEntity):
ENTITY_TYPES = ENTITY_TYPES
ENTITY_ID_FORMAT = ENTITY_ID_FORMAT
@property
def is_on(self) -> bool:
return bool(self._state)
@property
def device_state_attributes(self) -> Dict[str, Any]:
existing_attributes = super().device_state_attributes
entity_type = self._entity_type
if entity_type == "connection_state":
state = self._device.state
if state is not None:
existing_attributes.update(attr.asdict(state, True))
elif entity_type == "ev_charging_connected":
if not self._device.is_online:
return existing_attributes
state = self._device.state
existing_attributes["slow_charging"] = state.ev_charging_slow
existing_attributes["fast_charging"] = state.ev_charging_fast
existing_attributes["ready_status"] = state.ev_status_ready
return existing_attributes
async_setup_entry = partial(
async_platform_setup_entry, PLATFORM_DOMAIN, PandoraCASBinarySensor, logger=_LOGGER
)
| true | true |
f71ad2d57334e5b52814c0e8d8c40e1550a4d52b | 1,141 | py | Python | assignment_seven_2021_09_14/cycle_sort.py | Soyvolon/CISS_380 | 81ba41ef45ba8f4a4cfc55f9e20b87c5feddba08 | [
"Unlicense"
] | null | null | null | assignment_seven_2021_09_14/cycle_sort.py | Soyvolon/CISS_380 | 81ba41ef45ba8f4a4cfc55f9e20b87c5feddba08 | [
"Unlicense"
] | null | null | null | assignment_seven_2021_09_14/cycle_sort.py | Soyvolon/CISS_380 | 81ba41ef45ba8f4a4cfc55f9e20b87c5feddba08 | [
"Unlicense"
] | null | null | null | def cycle_sort(data: list):
cap = len(data)
for start in range(0, cap - 1):
# get item
item = data[start]
# get new pos for said item
pos = start
for i in range(start + 1, cap):
if data[i] < item:
pos += 1
# if there isnt a new pos, skip this, we don't move it
if start == pos:
continue
# skip past any any duplicates
while data[pos] == item:
pos += 1
# set the item to the pos
# and get the next item to move
data[pos], item = item, data[pos]
# take the new item and move
# it backwards until
# it is in its correct spot
while pos != start:
pos = start
# get the new pos value
for i in range(start + 1, cap):
if data[i] < item:
pos += 1
# skip duplicates
while data[pos] == item:
pos += 1
# and place item at data[pos] and prep
# data[pos] to be moved to the next spot
data[pos], item = item, data[pos]
| 31.694444 | 62 | 0.473269 | def cycle_sort(data: list):
cap = len(data)
for start in range(0, cap - 1):
item = data[start]
pos = start
for i in range(start + 1, cap):
if data[i] < item:
pos += 1
if start == pos:
continue
# skip past any any duplicates
while data[pos] == item:
pos += 1
# set the item to the pos
# and get the next item to move
data[pos], item = item, data[pos]
# take the new item and move
# it backwards until
# it is in its correct spot
while pos != start:
pos = start
# get the new pos value
for i in range(start + 1, cap):
if data[i] < item:
pos += 1
# skip duplicates
while data[pos] == item:
pos += 1
# and place item at data[pos] and prep
# data[pos] to be moved to the next spot
data[pos], item = item, data[pos]
| true | true |
f71ad48e5cbbd6870f5adadbf55d52267e89621a | 5,044 | py | Python | options.py | Kunalmighty/BlueJaysGame | 76936cea8a57542bf7ea3d546f851f9f9771931f | [
"MIT"
] | null | null | null | options.py | Kunalmighty/BlueJaysGame | 76936cea8a57542bf7ea3d546f851f9f9771931f | [
"MIT"
] | null | null | null | options.py | Kunalmighty/BlueJaysGame | 76936cea8a57542bf7ea3d546f851f9f9771931f | [
"MIT"
] | null | null | null | """ #EmbraceTheS's options menu state. """
import state
import menu
import globes
import pygame
import joystick
import volume
class Options(state.State):
""" Option menu state with the options to clear high scores, and
adjust brightness/volume (not yet implemented) """
TEXT = []
BACKGROUND = None
LEFT_MARGIN = None
HEIGHTS = None
def __init__(self, sound=False, option=0):
state.State.__init__(self)
if not sound:
globes.play_music("title.ogg")
if (Options.BACKGROUND is None):
Options.BACKGROUND = pygame.image.load("bg/titlescreen.png")\
.convert()
self.option = option
self.blink = 0 # cycle through 0-9, display if < 7
self.confirmation = False # if asking for action confirmation
self.confirmed = 0 # 0: no, 1: yes
if (len(Options.TEXT) == 0):
Options.TEXT = [globes.Globals.FONT.render("Clear High Scores",
True, globes.BLACK),
globes.Globals.FONT.render("Setup Joystick",
True, globes.BLACK),
globes.Globals.FONT.render("Volume & Brightness",
True, globes.BLACK),
globes.Globals.FONT.render("Return to Menu",
True, globes.BLACK)]
if Options.LEFT_MARGIN is None:
Options.LEFT_MARGIN = 2 * globes.Globals.WIDTH / 3
if Options.HEIGHTS is None:
Options.HEIGHTS = [
(globes.Globals.HEIGHT / 2 - globes.Globals.HEIGHT / 8),
globes.Globals.HEIGHT / 2 - globes.Globals.HEIGHT / 16,
globes.Globals.HEIGHT / 2,
globes.Globals.HEIGHT / 2 + globes.Globals.HEIGHT / 16,
(globes.Globals.HEIGHT / 2 + globes.Globals.HEIGHT / 8)
]
def render(self):
globes.Globals.SCREEN.blit(Options.BACKGROUND, (0, 0))
if not self.confirmation:
for i in range(4):
if ((not (self.option == i)) or self.blink < 7):
globes.Globals.SCREEN.blit(Options.TEXT[i],
(Options.LEFT_MARGIN,
Options.HEIGHTS[i]))
else:
surf = globes.Globals.FONT.render("Are you absolutely certain " +
"you want to erase", True,
globes.BLACK)
globes.Globals.SCREEN.blit(surf, (270, 70))
surf = globes.Globals.FONT.render("your legendary legacy?", True,
globes.BLACK)
globes.Globals.SCREEN.blit(surf, (370, 95))
if self.blink < 7 or not self.confirmed == 1:
surf = globes.Globals.FONT.render("Yes", True,
globes.BLACK)
globes.Globals.SCREEN.blit(surf, (430, 130))
if self.blink < 7 or not self.confirmed == 0:
surf = globes.Globals.FONT.render("No", True,
globes.BLACK)
globes.Globals.SCREEN.blit(surf, (530, 130))
def update(self, time):
self.blink = (self.blink + 1) % 10
def event(self, event):
if self.confirmation:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
self.confirmed = (self.confirmed - 1) % 2
elif event.key == pygame.K_RIGHT:
self.confirmed = (self.confirmed + 1) % 2
if event.key == pygame.K_SPACE or event.key == pygame.K_RETURN:
if self.confirmed:
globes.Globals.HIGHSCORES.clear_file()
self.confirmation = False
else:
self.confirmation = False
else:
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
globes.Globals.STATE = menu.Menu(True)
if event.type == pygame.KEYDOWN and event.key == pygame.K_UP:
self.option = (self.option - 1) % 4
if event.type == pygame.KEYDOWN and event.key == pygame.K_DOWN:
self.option = (self.option + 1) % 4
if event.type == pygame.KEYDOWN and \
(event.key == pygame.K_SPACE or
event.key == pygame.K_RETURN):
if self.option == 0:
self.confirmation = True
elif self.option == 1:
globes.Globals.STATE = joystick.Joystick()
elif self.option == 2:
globes.Globals.STATE = volume.Volume(True)
if self.option == 3:
globes.Globals.STATE = menu.Menu(True)
| 45.035714 | 79 | 0.490682 |
import state
import menu
import globes
import pygame
import joystick
import volume
class Options(state.State):
TEXT = []
BACKGROUND = None
LEFT_MARGIN = None
HEIGHTS = None
def __init__(self, sound=False, option=0):
state.State.__init__(self)
if not sound:
globes.play_music("title.ogg")
if (Options.BACKGROUND is None):
Options.BACKGROUND = pygame.image.load("bg/titlescreen.png")\
.convert()
self.option = option
self.blink = 0
self.confirmation = False
self.confirmed = 0
if (len(Options.TEXT) == 0):
Options.TEXT = [globes.Globals.FONT.render("Clear High Scores",
True, globes.BLACK),
globes.Globals.FONT.render("Setup Joystick",
True, globes.BLACK),
globes.Globals.FONT.render("Volume & Brightness",
True, globes.BLACK),
globes.Globals.FONT.render("Return to Menu",
True, globes.BLACK)]
if Options.LEFT_MARGIN is None:
Options.LEFT_MARGIN = 2 * globes.Globals.WIDTH / 3
if Options.HEIGHTS is None:
Options.HEIGHTS = [
(globes.Globals.HEIGHT / 2 - globes.Globals.HEIGHT / 8),
globes.Globals.HEIGHT / 2 - globes.Globals.HEIGHT / 16,
globes.Globals.HEIGHT / 2,
globes.Globals.HEIGHT / 2 + globes.Globals.HEIGHT / 16,
(globes.Globals.HEIGHT / 2 + globes.Globals.HEIGHT / 8)
]
def render(self):
globes.Globals.SCREEN.blit(Options.BACKGROUND, (0, 0))
if not self.confirmation:
for i in range(4):
if ((not (self.option == i)) or self.blink < 7):
globes.Globals.SCREEN.blit(Options.TEXT[i],
(Options.LEFT_MARGIN,
Options.HEIGHTS[i]))
else:
surf = globes.Globals.FONT.render("Are you absolutely certain " +
"you want to erase", True,
globes.BLACK)
globes.Globals.SCREEN.blit(surf, (270, 70))
surf = globes.Globals.FONT.render("your legendary legacy?", True,
globes.BLACK)
globes.Globals.SCREEN.blit(surf, (370, 95))
if self.blink < 7 or not self.confirmed == 1:
surf = globes.Globals.FONT.render("Yes", True,
globes.BLACK)
globes.Globals.SCREEN.blit(surf, (430, 130))
if self.blink < 7 or not self.confirmed == 0:
surf = globes.Globals.FONT.render("No", True,
globes.BLACK)
globes.Globals.SCREEN.blit(surf, (530, 130))
def update(self, time):
self.blink = (self.blink + 1) % 10
def event(self, event):
if self.confirmation:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
self.confirmed = (self.confirmed - 1) % 2
elif event.key == pygame.K_RIGHT:
self.confirmed = (self.confirmed + 1) % 2
if event.key == pygame.K_SPACE or event.key == pygame.K_RETURN:
if self.confirmed:
globes.Globals.HIGHSCORES.clear_file()
self.confirmation = False
else:
self.confirmation = False
else:
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
globes.Globals.STATE = menu.Menu(True)
if event.type == pygame.KEYDOWN and event.key == pygame.K_UP:
self.option = (self.option - 1) % 4
if event.type == pygame.KEYDOWN and event.key == pygame.K_DOWN:
self.option = (self.option + 1) % 4
if event.type == pygame.KEYDOWN and \
(event.key == pygame.K_SPACE or
event.key == pygame.K_RETURN):
if self.option == 0:
self.confirmation = True
elif self.option == 1:
globes.Globals.STATE = joystick.Joystick()
elif self.option == 2:
globes.Globals.STATE = volume.Volume(True)
if self.option == 3:
globes.Globals.STATE = menu.Menu(True)
| true | true |
f71ad5d930f9670c92bb44ed061e73d50e006900 | 1,532 | py | Python | tests/functional/test_lock.py | box/etcdb | 0f27846a0ca13efff9750b97a38939f66172debc | [
"Apache-2.0"
] | 12 | 2016-10-25T18:03:49.000Z | 2019-06-27T13:20:22.000Z | tests/functional/test_lock.py | box/etcdb | 0f27846a0ca13efff9750b97a38939f66172debc | [
"Apache-2.0"
] | 30 | 2016-10-20T23:27:09.000Z | 2018-12-06T17:23:59.000Z | tests/functional/test_lock.py | box/etcdb | 0f27846a0ca13efff9750b97a38939f66172debc | [
"Apache-2.0"
] | 4 | 2016-10-20T23:24:48.000Z | 2022-03-01T09:59:29.000Z | import pytest
from etcdb import OperationalError
from etcdb.lock import Lock, ReadLock, WriteLock
def test_readers(etcdb_connection):
cur = etcdb_connection.cursor()
cur.execute('CREATE TABLE bar(id int not null PRIMARY KEY)')
lock = ReadLock(etcdb_connection.client, 'foo', 'bar')
lock.acquire(ttl=0)
readers = lock.readers()
lock.release()
assert len(readers) == 1
readers = lock.readers()
assert len(readers) == 0
lock.acquire(ttl=0)
l2 = ReadLock(etcdb_connection.client, 'foo', 'bar')
l2.acquire(ttl=0)
readers = lock.readers()
assert len(readers) == 2
def test_writers(etcdb_connection):
cur = etcdb_connection.cursor()
cur.execute('CREATE TABLE bar(id int not null PRIMARY KEY)')
lock = WriteLock(etcdb_connection.client, 'foo', 'bar')
lock.acquire(ttl=0)
writers = lock.writers()
assert len(writers) == 1
lock.release()
writers = lock.writers()
assert len(writers) == 0
lock.acquire(ttl=0)
l2 = WriteLock(etcdb_connection.client, 'foo', 'bar')
with pytest.raises(OperationalError):
l2.acquire()
def test_attributes(etcdb_connection):
cur = etcdb_connection.cursor()
cur.execute('CREATE TABLE bar(id int not null PRIMARY KEY)')
lock = WriteLock(etcdb_connection.client, 'foo', 'bar')
lock.acquire(author='author foo', reason='reason foo')
assert lock.author == 'author foo'
assert lock.reason == 'reason foo'
assert type(lock.created_at) == int
assert lock.created_at > 0
| 30.039216 | 64 | 0.679504 | import pytest
from etcdb import OperationalError
from etcdb.lock import Lock, ReadLock, WriteLock
def test_readers(etcdb_connection):
cur = etcdb_connection.cursor()
cur.execute('CREATE TABLE bar(id int not null PRIMARY KEY)')
lock = ReadLock(etcdb_connection.client, 'foo', 'bar')
lock.acquire(ttl=0)
readers = lock.readers()
lock.release()
assert len(readers) == 1
readers = lock.readers()
assert len(readers) == 0
lock.acquire(ttl=0)
l2 = ReadLock(etcdb_connection.client, 'foo', 'bar')
l2.acquire(ttl=0)
readers = lock.readers()
assert len(readers) == 2
def test_writers(etcdb_connection):
cur = etcdb_connection.cursor()
cur.execute('CREATE TABLE bar(id int not null PRIMARY KEY)')
lock = WriteLock(etcdb_connection.client, 'foo', 'bar')
lock.acquire(ttl=0)
writers = lock.writers()
assert len(writers) == 1
lock.release()
writers = lock.writers()
assert len(writers) == 0
lock.acquire(ttl=0)
l2 = WriteLock(etcdb_connection.client, 'foo', 'bar')
with pytest.raises(OperationalError):
l2.acquire()
def test_attributes(etcdb_connection):
cur = etcdb_connection.cursor()
cur.execute('CREATE TABLE bar(id int not null PRIMARY KEY)')
lock = WriteLock(etcdb_connection.client, 'foo', 'bar')
lock.acquire(author='author foo', reason='reason foo')
assert lock.author == 'author foo'
assert lock.reason == 'reason foo'
assert type(lock.created_at) == int
assert lock.created_at > 0
| true | true |
f71ad6efb1b2752e5513922cac67d1c668226597 | 6,403 | py | Python | django_mako_plus/uid.py | knowsuchagency/django-mako-plus | e737be6a2db6e9e897cc804c660494415c4ea180 | [
"Apache-2.0"
] | null | null | null | django_mako_plus/uid.py | knowsuchagency/django-mako-plus | e737be6a2db6e9e897cc804c660494415c4ea180 | [
"Apache-2.0"
] | null | null | null | django_mako_plus/uid.py | knowsuchagency/django-mako-plus | e737be6a2db6e9e897cc804c660494415c4ea180 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
'''
Created by Conan Albrecht <doconix@gmail.com>
Apache open source license.
November, 2017
'''
##################################################
### Unique id generator. Similar to uuid1() but
### also includes the process id.
###
### Note that upping the counter requires a global lock.
###
### The bit assignment:
###
### 52 bits for nanoseconds since epoch (really it can use unlimited bits because on left side of the number, but 52 bits gets us to ~2100)
### 16 bits for counter
### 48 bits for machine id
### 24 bits for process id
### ========
### 140 bits total, or 35 hex characters
###
### Maximum number is 1.39e42
###
import uuid
import time as time
import os
import random
import threading
import math
import collections
# initial values/constants
lastnow = 0
counterstart = random.getrandbits(16) - 1
countermax = math.pow(2, 16) - 1
counter = counterstart
# returns a 48 bit number
machineid = uuid.getnode()
# linux is usually 16 bits
processid = os.getpid()
# the main data structure
UID = collections.namedtuple('UID', ( 'time', 'counter', 'machine', 'process' ))
# binary size of each number
# and binary positions for shifting
# and for splitting hex and binary (from right side)
size = UID(52, 16, 48, 24)
_shift = []
for i in reversed(range(len(size))):
_shift.append(sum(size[i:]))
shift = UID(*reversed(_shift))
hsplit = UID(*(int(s/-4) for s in shift))
bsplit = UID(*(s*-1 for s in shift))
######################################
### Main API
def ruid():
'''
Creates a "raw" unique id. The result is a
UID namedtuple with four parts:
time
counter
machine
process
All other functions in this module just format
the id created in this function.
'''
global lastnow, counter_start, counter
# update the nanoseconds and counter
with threading.RLock():
now = int(time.time())#int(time.time() * 1e6)
counter += 1
if counter >= countermax:
counter = 0
while now == lastnow and counter == counterstart:
time.sleep(.001) # wait a millisecond and try again
now = int(time.time())#int(time.time() * 1e6)
lastnow = now
# return the named tuple
return UID(now, counter, machineid, processid)
def iuid(raw=None):
'''
Creates a unique id as an int.
If provided, raw should be a UID named tuple
(usually from a call to ruid).
'''
if raw is None:
raw = ruid()
return (raw.time << shift.counter) + \
(raw.counter << shift.machine) + \
(raw.machine << shift.process) + \
(raw.process)
def uid(raw=None, sep=None):
'''
Creates a unique id as a hex string.
If provided, raw should be a UID named tuple
(usually from a call to ruid).
Use sep='-' to separate the parts by dashes.
'''
if raw is None:
raw = ruid()
# hex version
if sep is None:
return '{:0x}'.format(iuid(raw))
# pretty version
n = uid(raw)
return sep.join((
n[:hsplit.counter],
n[hsplit.counter: hsplit.machine],
n[hsplit.machine: hsplit.process],
n[hsplit.process:],
))
def buid(raw=None, sep=None):
'''
Creates a unique id as a binary string.
If provided, raw should be a UID named tuple
(usually from a call to ruid).
Use sep='-' to separate the parts by dashes.
'''
if raw is None:
raw = ruid()
# hex version
if sep is None:
return '{:0b}'.format(iuid(raw))
# pretty version
n = buid(raw)
return sep.join((
n[:bsplit.counter],
n[bsplit.counter: bsplit.machine],
n[bsplit.machine: bsplit.process],
n[bsplit.process:],
))
def wuid(raw=None, leading='u'):
'''
Creates a unique id as a web-compliant id
for use in HTML ids. This is the same as
a hex id, but it has a leading `u` to ensure
an alphabetical character comes first, per
the standard.
If provided, raw should be a UID named tuple
(usually from a call to ruid).
Use sep='-' to separate the parts by dashes.
'''
if raw is None:
raw = ruid()
return '{}{}'.format(leading, uid(raw))
def iunpack(n):
'''
Unpacks the given integer number
into a UID namedtuple.
'''
# format of these is (mask & n) >> shifted
return UID(
n >> shift.counter,
((((1 << size.counter) - 1) << shift.machine) & n) >> shift.machine,
((((1 << size.machine) - 1) << shift.process) & n) >> shift.process,
((1 << shift.process) - 1) & n,
)
def unpack(hex_n):
'''
Unpacks the given hex number string
into a UID namedtuple.
To unpack a web id, use
unpack(myid[1:])
to remove the leading character.
'''
return iunpack(int(hex_n, 16))
###################################################
### Unit tests for this module:
###
### python3 uid.py
###
import unittest
class Tester(unittest.TestCase):
def test_ruid(self):
u = ruid()
u2 = ruid()
self.assertEqual(u.machine, u2.machine)
self.assertEqual(u.process, u2.process)
def test_int_hex_binary(self):
u = ruid()
n = iuid(u)
h = uid(u)
b = buid(u)
self.assertEqual(n, int(h, 16))
self.assertEqual(n, int(b, 2))
def test_int_hex_binary(self):
u = ruid()
n = iuid(u)
h = uid(u)
b = buid(u)
self.assertEqual(n, int(h, 16))
self.assertEqual(n, int(b, 2))
def test_pretty(self):
u = ruid()
# hex
h = uid(u)
p = uid(u, '-')
self.assertEqual(h, p.replace('-', ''))
# binary
b = buid(u)
p = buid(u, '-')
self.assertEqual(b, p.replace('-', ''))
def test_unpack(self):
# one test
u = ruid()
self.assertEqual(u, unpack(uid(u)))
self.assertEqual(u, iunpack(iuid(u)))
# other direction with int
n = iuid()
self.assertEqual(n, iuid(iunpack(n)))
# other direction with hex
h = uid()
self.assertEqual(h, uid(unpack(h)))
if __name__ == '__main__':
unittest.main()
| 25.109804 | 144 | 0.558176 | true | true | |
f71ad714eec52284cdcd59b0da289a9e2213538e | 646 | py | Python | data_source.py | ReimuYk/aws-line-counter | ce6131b8a2f3c9b9e70b8496e3f9ce1cfdd64804 | [
"MIT"
] | null | null | null | data_source.py | ReimuYk/aws-line-counter | ce6131b8a2f3c9b9e70b8496e3f9ce1cfdd64804 | [
"MIT"
] | null | null | null | data_source.py | ReimuYk/aws-line-counter | ce6131b8a2f3c9b9e70b8496e3f9ce1cfdd64804 | [
"MIT"
] | null | null | null | import requests
import os
import time
def get_page(i):
url = r'https://shr32taah3.execute-api.us-east-1.amazonaws.com/Prod/applications/browse?pageSize=12&pageNumber=%d&searchText=&category=&runtime=&verified=&sortFields='
page = requests.get(url%i)
return eval(page.text.replace("true", "True").replace("false", "False"))
data = get_page(3)
for i in range(1, 4):
data = get_page(i)
for item in data["applications"]:
print(item["deploymentCount"], end="\t")
print(item["name"])
print(item["homePageUrl"])
print()
# os.popen("git clone " + item["homePageUrl"])
# time.sleep(3)
| 32.3 | 171 | 0.645511 | import requests
import os
import time
def get_page(i):
url = r'https://shr32taah3.execute-api.us-east-1.amazonaws.com/Prod/applications/browse?pageSize=12&pageNumber=%d&searchText=&category=&runtime=&verified=&sortFields='
page = requests.get(url%i)
return eval(page.text.replace("true", "True").replace("false", "False"))
data = get_page(3)
for i in range(1, 4):
data = get_page(i)
for item in data["applications"]:
print(item["deploymentCount"], end="\t")
print(item["name"])
print(item["homePageUrl"])
print()
| true | true |
f71ad7b7135ea8f54cdfded058aa8b21c4b24595 | 762 | py | Python | learntools/core/multiproblem.py | bkmalayC/learntools | c739a1ee131caebcb9bbd8b138d51cff75152f3a | [
"Apache-2.0"
] | 1 | 2020-06-24T18:25:31.000Z | 2020-06-24T18:25:31.000Z | learntools/core/multiproblem.py | bkmalayC/learntools | c739a1ee131caebcb9bbd8b138d51cff75152f3a | [
"Apache-2.0"
] | null | null | null | learntools/core/multiproblem.py | bkmalayC/learntools | c739a1ee131caebcb9bbd8b138d51cff75152f3a | [
"Apache-2.0"
] | null | null | null | class MultipartProblem:
"""A container for multiple related Problems grouped together in one
question. If q1 is a MPP, its subquestions are accessed as q1.a, q1.b, etc.
"""
def __init__(self, *probs):
self.problems = probs
# TODO: This should be ordered.
self._prob_map = {}
def _repr_markdown_(self):
return repr(self)
def __repr__(self):
varname = self._varname
part_names = ['`{}.{}`'.format(varname, letter) for letter in self._prob_map]
return """This question is in {} parts. Those parts can be accessed as {}.
For example, to get a hint about part a, you would type `{}.a.hint()`.""".format(
len(self._prob_map), ', '.join(part_names), varname
)
| 34.636364 | 85 | 0.616798 | class MultipartProblem:
def __init__(self, *probs):
self.problems = probs
self._prob_map = {}
def _repr_markdown_(self):
return repr(self)
def __repr__(self):
varname = self._varname
part_names = ['`{}.{}`'.format(varname, letter) for letter in self._prob_map]
return """This question is in {} parts. Those parts can be accessed as {}.
For example, to get a hint about part a, you would type `{}.a.hint()`.""".format(
len(self._prob_map), ', '.join(part_names), varname
)
| true | true |
f71ad7f78f34b02d7d0b0895dc72b58d6a84bcbe | 1,775 | py | Python | {{cookiecutter.repo_name}}/python/{{cookiecutter.package_name}}/api/__init__.py | havok2063/cookiecutter-marvin | 3e64169b865012c5a6099e71a91789770222e5b5 | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.repo_name}}/python/{{cookiecutter.package_name}}/api/__init__.py | havok2063/cookiecutter-marvin | 3e64169b865012c5a6099e71a91789770222e5b5 | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.repo_name}}/python/{{cookiecutter.package_name}}/api/__init__.py | havok2063/cookiecutter-marvin | 3e64169b865012c5a6099e71a91789770222e5b5 | [
"BSD-3-Clause"
] | null | null | null | # !usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
from __future__ import print_function, division, absolute_import
from flask import request
def process_request(request=None, as_dict=None, param=None):
'''Generally process the request for POST or GET, and build a form dictionary
Parameters:
request (request):
HTTP request object containing POST or GET data
as_dict (bool):
Boolean indicating whether to return the data as a standard dict or not
param (str):
Parameter name to extract from the request form
Returns:
Dict or ImmutableMultiDict
'''
# get form data
if request.method == 'POST':
if not request.form:
# if data is content-type json
data = request.get_json()
else:
# if data is content-type form
data = request.form
elif request.method == 'GET':
data = request.args
else:
return {}
# # if no data at all, return nothing
if param and data:
return data.get(param, None)
# convert ImmutableMultiDict to dictionary (if get or post-form) or use dict if post-json
if as_dict:
if isinstance(data, dict):
form = data
else:
# use multidict lists and iterlists to group multiple values for same in key into list
try:
# py2.7
form = {key: val if len(val) > 1 else val[0] for key, val in data.iterlists()}
except AttributeError:
# py3.5
form = {key: val if len(val) > 1 else val[0] for key, val in data.lists()}
else:
form = data
return form
| 30.603448 | 98 | 0.579155 |
from __future__ import print_function, division, absolute_import
from flask import request
def process_request(request=None, as_dict=None, param=None):
if request.method == 'POST':
if not request.form:
data = request.get_json()
else:
data = request.form
elif request.method == 'GET':
data = request.args
else:
return {}
urn data.get(param, None)
if as_dict:
if isinstance(data, dict):
form = data
else:
try:
form = {key: val if len(val) > 1 else val[0] for key, val in data.iterlists()}
except AttributeError:
form = {key: val if len(val) > 1 else val[0] for key, val in data.lists()}
else:
form = data
return form
| true | true |
f71ad891414e822ed9504c04e3c021fb01b0b6e5 | 13,313 | py | Python | negative_inline_editor/templatetags/negative_inline_edit.py | negative-space/negative-inline-editor | dde28b9ada65b81cb996bb9197826df45b67c48b | [
"MIT"
] | null | null | null | negative_inline_editor/templatetags/negative_inline_edit.py | negative-space/negative-inline-editor | dde28b9ada65b81cb996bb9197826df45b67c48b | [
"MIT"
] | null | null | null | negative_inline_editor/templatetags/negative_inline_edit.py | negative-space/negative-inline-editor | dde28b9ada65b81cb996bb9197826df45b67c48b | [
"MIT"
] | null | null | null | import os
import re
from django import template
from django.conf import settings
from django.contrib import admin
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import MultipleObjectsReturned
from django.urls import reverse
from django.db.models import Model
from django.template import Library, loader
from django.template.defaulttags import ForNode
from django.utils.module_loading import import_string
from django.utils.safestring import mark_safe
from django.utils.translation import get_language
register = Library()
def get_model_by_name(model_name):
from django.apps import apps
app_name, model_name = model_name.split('.', 1)
return apps.get_model(app_name, model_name)
class EditableNode(template.Node):
def __init__(self, expr, field, html=False):
self.expr = expr
self.field = field
self.html = html
def render(self, context):
expr = self.expr
model = get_model_by_expr(context, expr)
if not context['request'].session.get('editable'):
return getattr(model, self.field, None)
if not context['request'].session.get('editable_inplace'):
return getattr(model, self.field, None)
model_cls = '{}.{}'.format(model._meta.app_label, model.__class__.__name__)
save_url = reverse('update_model')
return '<span contenteditable="true" data-html="{}" class="editable-model" data-editable-model="{}" data-editable-pk="{}" ' \
'data-editable-field="{}" data-save-url="{}">{}</span>'.format(
'true' if self.html else 'false', model_cls, model.pk, self.field, save_url,
getattr(model, self.field, None)
)
def get_model_by_expr(context, expr):
model = template.Variable(expr).resolve(context)
if not isinstance(model, Model):
raise ValueError('Left part of expression "{}" do not evaluate to Django model: {}'.format(
expr,
repr(model)
))
if model.pk is None:
raise ValueError(
'Left part of expression "{}" evaluated to model that have no primary key. Not saved? {}'.format(
expr,
repr(model)
))
return model
@register.tag
def editable(parser, token):
bits = token.split_contents()
if len(bits) < 2 or len(bits) > 3:
raise template.TemplateSyntaxError(
"%r tag requires at least single argument" % token.contents.split()[0]
)
expr = bits[1]
if len(bits) == 3:
if bits[2] != '@html':
raise template.TemplateSyntaxError(
"%r tag requires at least single argument" % token.contents.split()[0]
)
html = True
else:
html = False
if '->' not in expr:
raise template.TemplateSyntaxError(
"%r tag's argument should be expression in form: model->field" % tag_name
)
expr, field = [x.strip() for x in expr.split('->')]
return EditableNode(expr, field, html=html)
class EditablePanel(object):
def __init__(self, name, model, field=None, add_btn=None, form_style=False) -> None:
super().__init__()
self.name = name
self.model = model
self.field = field
self.add_btn = add_btn
self.form_style = form_style
self.model_cls = type(model)
try:
self.admin_cls = admin.site._registry[self.model_cls]
content_type = ContentType.objects.get_for_model(self.model_cls) # .__class__
self.model_admin_url = reverse("admin:%s_%s_changelist" % (content_type.app_label, content_type.model)) \
+ str(model.id) + '/change/'
except IndexError:
self.admin_cls = None
if not form_style:
assert self.field
assert self.add_btn
@property
def items(self):
return [{'obj': x, 'cls': x._meta.verbose_name} for x in getattr(self.model, self.field).all()]
class ForWrappingNode(template.Node):
def __init__(self, for_node, expr, field, panel_edit, inline_edit, alias):
self.for_node = for_node
self.expr = expr
self.field = field
self.panel_edit = panel_edit
self.inline_edit = inline_edit
self.alias = alias
def render(self, context):
if not context['request'].session.get('editable'):
return self.for_node.render(context)
model = get_model_by_expr(context, self.expr)
model_cls = '{}.{}'.format(model._meta.app_label, model.__class__.__name__)
related = get_model_by_name(model_cls)._meta.get_field(self.field)
content_type = ContentType.objects.get_for_model(related.related_model) # .__class__
model_admin_url = reverse("admin:%s_%s_changelist" % (content_type.app_label, content_type.model))
update_sort_url = reverse('update_sort')
rendered_for = self.for_node.render(context)
add_btn = f'<a id="editable-{model_cls}-{model.pk}" class="editable-list-btn" data-editable-model="{model_cls}" data-editable-pk="{model.pk}" ' \
f'data-editable-field="{self.field}" data-editable-related-field="{related.field.name}" data-related-admin-url="{model_admin_url}" data-update-sort-url="{update_sort_url}"></a>'
if self.panel_edit:
if not hasattr(context['request'], 'editable_panels'):
context['request'].editable_panels = {}
panel_name = self.alias or related.related_name
context['request'].editable_panels[panel_name] = EditablePanel(
name=panel_name,
model=model,
field=self.field,
add_btn=add_btn
)
if not self.inline_edit or not context['request'].session.get('editable_inplace'):
return rendered_for
return add_btn + rendered_for
@register.tag(name='editable-related')
def editable_list(parser, token):
bits = token.split_contents()
panel_edit = False
inline_edit = True
alias = None
panel_expr = re.match('^@(panel|panel_only)(\(([^\)]+)\))?$', bits[-1])
if panel_expr:
bits = bits[0:-1]
panel_type = panel_expr.group(1)
alias = panel_expr.group(3)
if panel_type == 'panel':
panel_edit = True
elif panel_type == 'panel_only':
inline_edit = False
panel_edit = True
if len(bits) < 4:
raise template.TemplateSyntaxError("'editable-list' statements should have at least four"
" words: %s" % token.contents)
is_reversed = bits[-1] == 'reversed'
in_index = -3 if is_reversed else -2
if bits[in_index] != 'in':
raise template.TemplateSyntaxError("'for' statements should use the format"
" 'for x in y': %s" % token.contents)
loopvars = re.split(r' *, *', ' '.join(bits[1:in_index]))
for var in loopvars:
if not var or ' ' in var:
raise template.TemplateSyntaxError("'for' tag received an invalid argument:"
" %s" % token.contents)
raw_expr = bits[in_index + 1]
expr, field = [x.strip() for x in raw_expr.split('->')]
sequence = parser.compile_filter('{}.{}.all'.format(expr, field))
nodelist_loop = parser.parse(('end-editable-related',))
token = parser.next_token()
# if token.contents == 'empty':
# nodelist_empty = parser.parse(('endfor',))
# parser.delete_first_token()
# else:
nodelist_empty = None
return ForWrappingNode(
ForNode(loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty),
expr,
field,
panel_edit=panel_edit,
inline_edit=inline_edit,
alias=alias
)
@register.simple_tag(takes_context=True, name='_')
def translate_inline(context, value):
from negative_i18n.models import StringTranslation
from negative_i18n.trans_utils import translate_lazy
if 'request' in context and context['request'].session.get('editable'):
if 'disable_i18n_collect' not in context:
if not hasattr(context['request'], 'editable_strings'):
context['request'].editable_strings = set()
context['request'].editable_strings.add(value)
if not 'request' in context or not context['request'].session.get('editable_inplace'):
return translate_lazy(value)
try:
obj, created = StringTranslation.objects.get_or_create(key=value)
except MultipleObjectsReturned:
first = StringTranslation.objects.filter(key=value)[0]
StringTranslation.objects.exclude(id=first.id).filter(key=value).delete()
obj, created = first, False
save_url = reverse('update_model')
return mark_safe(
'<span contenteditable="true" class="editable-model" data-editable-model="{}" data-editable-pk="{}" ' \
'data-editable-field="{}" data-save-url="{}">{}</span>'.format(
'negative_i18n.StringTranslation', obj.pk, 'translation', save_url, obj.translation or value
))
class EditModelNode(template.Node):
def __init__(self, expr, alias):
self.expr = expr
self.alias = alias
def render(self, context):
if not context['request'].session.get('editable'):
return ''
model = get_model_by_expr(context, self.expr)
if not hasattr(context['request'], 'editable_panels'):
context['request'].editable_panels = {}
panel_name = self.alias or model._meta.verbose_name
context['request'].editable_panels[panel_name] = EditablePanel(
name=panel_name,
model=model,
form_style=True
)
return ''
@register.tag(name='editable-model')
def editable_model(parser, token):
bits = token.split_contents()
alias = None
if len(bits) != 2:
if len(bits) == 4 and bits[2] == 'as':
alias = bits[3]
else:
raise template.TemplateSyntaxError("'editable-model' statements should have at least two"
" words: %s" % token.contents)
return EditModelNode(bits[1], alias=alias)
class InlineFormNode(template.Node):
def __init__(self, form_name, var_name):
self.form_name = form_name
self.var_name = var_name
def render(self, context):
request = context['request']
form_cls = import_string(self.form_name)
if request.method == 'POST':
form = form_cls(request.POST)
if form.is_valid():
form.save()
else:
pass
else:
form = form_cls()
context[self.var_name] = form
return ''
@register.tag
def load_form(parser, token):
try:
tag_name, form_name, as_expr, variable_name = token.split_contents()
if as_expr != 'as':
raise ValueError
except ValueError:
raise template.TemplateSyntaxError(
"%r tag requires arguments in form: load_form form_name as var_name" % token.contents.split()[0]
)
return InlineFormNode(form_name, variable_name)
class EditableWrapNode(template.Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
html = self.nodelist.render(context)
if not context['request'].user.is_superuser:
return html
if context['request'].session.get('editable'):
extra_class = ' open' if context['request'].GET.get('editableTab') else ''
html = '<div class="cratis-editable-wrapper' + extra_class + '">' + self.nodelist.render(
context) + '</div>'
t = loader.get_template('editable-model/panel.html')
context.push({'langs': settings.LANGUAGES, 'lang': get_language()})
panel_html = t.render(context.flatten())
css_file = os.path.dirname(os.path.dirname(__file__)) + '/static/editable-model/editable-model.css'
with open(css_file) as f:
css_data = '<style>' + re.sub('\s+', ' ', f.read()) + '</style>'
return css_data + html + panel_html
@register.tag(name='editable-wrap')
def editable_wrap(parser, token):
bits = token.split_contents()
if len(bits) > 1:
raise template.TemplateSyntaxError("'editable-wrap' statement do not accept arguments")
nodelist_loop = parser.parse(('end-editable-wrap',))
token = parser.next_token()
return EditableWrapNode(nodelist=nodelist_loop)
class WithViewContextNode(template.Node):
def __init__(self, expr):
self.expr = expr
def render(self, context):
cls = import_string(self.expr)
view = cls(request=context['request'], kwargs={})
for key, val in view.get_context_data().items():
if key not in context:
context[key] = val
return ''
@register.tag(name='load-view-context')
def load_view_context(parser, token):
bits = token.split_contents()
if len(bits) != 2:
raise template.TemplateSyntaxError("'load-view-context' requires argument")
return WithViewContextNode(expr=bits[1])
| 32.234867 | 189 | 0.621948 | import os
import re
from django import template
from django.conf import settings
from django.contrib import admin
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import MultipleObjectsReturned
from django.urls import reverse
from django.db.models import Model
from django.template import Library, loader
from django.template.defaulttags import ForNode
from django.utils.module_loading import import_string
from django.utils.safestring import mark_safe
from django.utils.translation import get_language
register = Library()
def get_model_by_name(model_name):
from django.apps import apps
app_name, model_name = model_name.split('.', 1)
return apps.get_model(app_name, model_name)
class EditableNode(template.Node):
def __init__(self, expr, field, html=False):
self.expr = expr
self.field = field
self.html = html
def render(self, context):
expr = self.expr
model = get_model_by_expr(context, expr)
if not context['request'].session.get('editable'):
return getattr(model, self.field, None)
if not context['request'].session.get('editable_inplace'):
return getattr(model, self.field, None)
model_cls = '{}.{}'.format(model._meta.app_label, model.__class__.__name__)
save_url = reverse('update_model')
return '<span contenteditable="true" data-html="{}" class="editable-model" data-editable-model="{}" data-editable-pk="{}" ' \
'data-editable-field="{}" data-save-url="{}">{}</span>'.format(
'true' if self.html else 'false', model_cls, model.pk, self.field, save_url,
getattr(model, self.field, None)
)
def get_model_by_expr(context, expr):
model = template.Variable(expr).resolve(context)
if not isinstance(model, Model):
raise ValueError('Left part of expression "{}" do not evaluate to Django model: {}'.format(
expr,
repr(model)
))
if model.pk is None:
raise ValueError(
'Left part of expression "{}" evaluated to model that have no primary key. Not saved? {}'.format(
expr,
repr(model)
))
return model
@register.tag
def editable(parser, token):
bits = token.split_contents()
if len(bits) < 2 or len(bits) > 3:
raise template.TemplateSyntaxError(
"%r tag requires at least single argument" % token.contents.split()[0]
)
expr = bits[1]
if len(bits) == 3:
if bits[2] != '@html':
raise template.TemplateSyntaxError(
"%r tag requires at least single argument" % token.contents.split()[0]
)
html = True
else:
html = False
if '->' not in expr:
raise template.TemplateSyntaxError(
"%r tag's argument should be expression in form: model->field" % tag_name
)
expr, field = [x.strip() for x in expr.split('->')]
return EditableNode(expr, field, html=html)
class EditablePanel(object):
def __init__(self, name, model, field=None, add_btn=None, form_style=False) -> None:
super().__init__()
self.name = name
self.model = model
self.field = field
self.add_btn = add_btn
self.form_style = form_style
self.model_cls = type(model)
try:
self.admin_cls = admin.site._registry[self.model_cls]
content_type = ContentType.objects.get_for_model(self.model_cls) # .__class__
self.model_admin_url = reverse("admin:%s_%s_changelist" % (content_type.app_label, content_type.model)) \
+ str(model.id) + '/change/'
except IndexError:
self.admin_cls = None
if not form_style:
assert self.field
assert self.add_btn
@property
def items(self):
return [{'obj': x, 'cls': x._meta.verbose_name} for x in getattr(self.model, self.field).all()]
class ForWrappingNode(template.Node):
def __init__(self, for_node, expr, field, panel_edit, inline_edit, alias):
self.for_node = for_node
self.expr = expr
self.field = field
self.panel_edit = panel_edit
self.inline_edit = inline_edit
self.alias = alias
def render(self, context):
if not context['request'].session.get('editable'):
return self.for_node.render(context)
model = get_model_by_expr(context, self.expr)
model_cls = '{}.{}'.format(model._meta.app_label, model.__class__.__name__)
related = get_model_by_name(model_cls)._meta.get_field(self.field)
content_type = ContentType.objects.get_for_model(related.related_model) # .__class__
model_admin_url = reverse("admin:%s_%s_changelist" % (content_type.app_label, content_type.model))
update_sort_url = reverse('update_sort')
rendered_for = self.for_node.render(context)
add_btn = f'<a id="editable-{model_cls}-{model.pk}" class="editable-list-btn" data-editable-model="{model_cls}" data-editable-pk="{model.pk}" ' \
f'data-editable-field="{self.field}" data-editable-related-field="{related.field.name}" data-related-admin-url="{model_admin_url}" data-update-sort-url="{update_sort_url}"></a>'
if self.panel_edit:
if not hasattr(context['request'], 'editable_panels'):
context['request'].editable_panels = {}
panel_name = self.alias or related.related_name
context['request'].editable_panels[panel_name] = EditablePanel(
name=panel_name,
model=model,
field=self.field,
add_btn=add_btn
)
if not self.inline_edit or not context['request'].session.get('editable_inplace'):
return rendered_for
return add_btn + rendered_for
@register.tag(name='editable-related')
def editable_list(parser, token):
bits = token.split_contents()
panel_edit = False
inline_edit = True
alias = None
panel_expr = re.match('^@(panel|panel_only)(\(([^\)]+)\))?$', bits[-1])
if panel_expr:
bits = bits[0:-1]
panel_type = panel_expr.group(1)
alias = panel_expr.group(3)
if panel_type == 'panel':
panel_edit = True
elif panel_type == 'panel_only':
inline_edit = False
panel_edit = True
if len(bits) < 4:
raise template.TemplateSyntaxError("'editable-list' statements should have at least four"
" words: %s" % token.contents)
is_reversed = bits[-1] == 'reversed'
in_index = -3 if is_reversed else -2
if bits[in_index] != 'in':
raise template.TemplateSyntaxError("'for' statements should use the format"
" 'for x in y': %s" % token.contents)
loopvars = re.split(r' *, *', ' '.join(bits[1:in_index]))
for var in loopvars:
if not var or ' ' in var:
raise template.TemplateSyntaxError("'for' tag received an invalid argument:"
" %s" % token.contents)
raw_expr = bits[in_index + 1]
expr, field = [x.strip() for x in raw_expr.split('->')]
sequence = parser.compile_filter('{}.{}.all'.format(expr, field))
nodelist_loop = parser.parse(('end-editable-related',))
token = parser.next_token()
# if token.contents == 'empty':
# nodelist_empty = parser.parse(('endfor',))
# parser.delete_first_token()
# else:
nodelist_empty = None
return ForWrappingNode(
ForNode(loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty),
expr,
field,
panel_edit=panel_edit,
inline_edit=inline_edit,
alias=alias
)
@register.simple_tag(takes_context=True, name='_')
def translate_inline(context, value):
from negative_i18n.models import StringTranslation
from negative_i18n.trans_utils import translate_lazy
if 'request' in context and context['request'].session.get('editable'):
if 'disable_i18n_collect' not in context:
if not hasattr(context['request'], 'editable_strings'):
context['request'].editable_strings = set()
context['request'].editable_strings.add(value)
if not 'request' in context or not context['request'].session.get('editable_inplace'):
return translate_lazy(value)
try:
obj, created = StringTranslation.objects.get_or_create(key=value)
except MultipleObjectsReturned:
first = StringTranslation.objects.filter(key=value)[0]
StringTranslation.objects.exclude(id=first.id).filter(key=value).delete()
obj, created = first, False
save_url = reverse('update_model')
return mark_safe(
'<span contenteditable="true" class="editable-model" data-editable-model="{}" data-editable-pk="{}" ' \
'data-editable-field="{}" data-save-url="{}">{}</span>'.format(
'negative_i18n.StringTranslation', obj.pk, 'translation', save_url, obj.translation or value
))
class EditModelNode(template.Node):
def __init__(self, expr, alias):
self.expr = expr
self.alias = alias
def render(self, context):
if not context['request'].session.get('editable'):
return ''
model = get_model_by_expr(context, self.expr)
if not hasattr(context['request'], 'editable_panels'):
context['request'].editable_panels = {}
panel_name = self.alias or model._meta.verbose_name
context['request'].editable_panels[panel_name] = EditablePanel(
name=panel_name,
model=model,
form_style=True
)
return ''
@register.tag(name='editable-model')
def editable_model(parser, token):
bits = token.split_contents()
alias = None
if len(bits) != 2:
if len(bits) == 4 and bits[2] == 'as':
alias = bits[3]
else:
raise template.TemplateSyntaxError("'editable-model' statements should have at least two"
" words: %s" % token.contents)
return EditModelNode(bits[1], alias=alias)
class InlineFormNode(template.Node):
def __init__(self, form_name, var_name):
self.form_name = form_name
self.var_name = var_name
def render(self, context):
request = context['request']
form_cls = import_string(self.form_name)
if request.method == 'POST':
form = form_cls(request.POST)
if form.is_valid():
form.save()
else:
pass
else:
form = form_cls()
context[self.var_name] = form
return ''
@register.tag
def load_form(parser, token):
try:
tag_name, form_name, as_expr, variable_name = token.split_contents()
if as_expr != 'as':
raise ValueError
except ValueError:
raise template.TemplateSyntaxError(
"%r tag requires arguments in form: load_form form_name as var_name" % token.contents.split()[0]
)
return InlineFormNode(form_name, variable_name)
class EditableWrapNode(template.Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
html = self.nodelist.render(context)
if not context['request'].user.is_superuser:
return html
if context['request'].session.get('editable'):
extra_class = ' open' if context['request'].GET.get('editableTab') else ''
html = '<div class="cratis-editable-wrapper' + extra_class + '">' + self.nodelist.render(
context) + '</div>'
t = loader.get_template('editable-model/panel.html')
context.push({'langs': settings.LANGUAGES, 'lang': get_language()})
panel_html = t.render(context.flatten())
css_file = os.path.dirname(os.path.dirname(__file__)) + '/static/editable-model/editable-model.css'
with open(css_file) as f:
css_data = '<style>' + re.sub('\s+', ' ', f.read()) + '</style>'
return css_data + html + panel_html
@register.tag(name='editable-wrap')
def editable_wrap(parser, token):
bits = token.split_contents()
if len(bits) > 1:
raise template.TemplateSyntaxError("'editable-wrap' statement do not accept arguments")
nodelist_loop = parser.parse(('end-editable-wrap',))
token = parser.next_token()
return EditableWrapNode(nodelist=nodelist_loop)
class WithViewContextNode(template.Node):
def __init__(self, expr):
self.expr = expr
def render(self, context):
cls = import_string(self.expr)
view = cls(request=context['request'], kwargs={})
for key, val in view.get_context_data().items():
if key not in context:
context[key] = val
return ''
@register.tag(name='load-view-context')
def load_view_context(parser, token):
bits = token.split_contents()
if len(bits) != 2:
raise template.TemplateSyntaxError("'load-view-context' requires argument")
return WithViewContextNode(expr=bits[1])
| true | true |
f71ad930fc472c80200a4a8c8b4190aa61e62059 | 675 | py | Python | components/app/App_tpl.py | bitbuit/billterm | 553bf2afb6ff2c1e15becbe1b4ab59346e5a87b5 | [
"MIT"
] | null | null | null | components/app/App_tpl.py | bitbuit/billterm | 553bf2afb6ff2c1e15becbe1b4ab59346e5a87b5 | [
"MIT"
] | null | null | null | components/app/App_tpl.py | bitbuit/billterm | 553bf2afb6ff2c1e15becbe1b4ab59346e5a87b5 | [
"MIT"
] | null | null | null | from libs.Screen import *
class App_tpl(object):
@staticmethod
def hello():
print(" _ _ _ _ _ ")
print(" | |__(_) | | |_ ___ _ _ _ __ ")
print(" | '_ \ | | | _/ -_) '_| ' \ ")
print("__|_.__/_|_|_|\__\___|_| |_|_|_|_H_e_l_l_o_!")
print(" ~ Write " + Text_style.BOLD + "help" + Text_style.END_STYLE + " if you are lost :$")
@staticmethod
def list_dbs(dbs):
print("Existing DBs")
for db in dbs:
Screen.render_line([ [db, "{:<20}", Text_style.BLUE] ])
@staticmethod
def help():
print('helping people!')
| 30.681818 | 109 | 0.459259 | from libs.Screen import *
class App_tpl(object):
@staticmethod
def hello():
print(" _ _ _ _ _ ")
print(" | |__(_) | | |_ ___ _ _ _ __ ")
print(" | '_ \ | | | _/ -_) '_| ' \ ")
print("__|_.__/_|_|_|\__\___|_| |_|_|_|_H_e_l_l_o_!")
print(" ~ Write " + Text_style.BOLD + "help" + Text_style.END_STYLE + " if you are lost :$")
@staticmethod
def list_dbs(dbs):
print("Existing DBs")
for db in dbs:
Screen.render_line([ [db, "{:<20}", Text_style.BLUE] ])
@staticmethod
def help():
print('helping people!')
| true | true |
f71adab4390632d131f94912e04795cf9ddfadd8 | 408 | py | Python | tbx/people/migrations/0009_personpage_alt_short_intro.py | arush15june/wagtail-torchbox | c4d06e096c72bd8007975dc016133024f9d27fab | [
"MIT"
] | null | null | null | tbx/people/migrations/0009_personpage_alt_short_intro.py | arush15june/wagtail-torchbox | c4d06e096c72bd8007975dc016133024f9d27fab | [
"MIT"
] | null | null | null | tbx/people/migrations/0009_personpage_alt_short_intro.py | arush15june/wagtail-torchbox | c4d06e096c72bd8007975dc016133024f9d27fab | [
"MIT"
] | null | null | null | # Generated by Django 2.1.5 on 2019-02-16 07:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0008_personpage_short_intro'),
]
operations = [
migrations.AddField(
model_name='personpage',
name='alt_short_intro',
field=models.TextField(blank=True, null=True),
),
]
| 21.473684 | 58 | 0.612745 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0008_personpage_short_intro'),
]
operations = [
migrations.AddField(
model_name='personpage',
name='alt_short_intro',
field=models.TextField(blank=True, null=True),
),
]
| true | true |
f71adb37c06a58da63ac0e0d5a07fe9ff030a284 | 2,941 | py | Python | southwest-headers.py | WGriffing/southwest-headers | 0dd4ac4e2ea50872638499b5b14673aa4b5fa60b | [
"MIT"
] | 12 | 2021-12-09T16:04:52.000Z | 2022-03-25T01:03:58.000Z | southwest-headers.py | WGriffing/southwest-headers | 0dd4ac4e2ea50872638499b5b14673aa4b5fa60b | [
"MIT"
] | 3 | 2021-12-13T00:04:53.000Z | 2022-01-02T06:37:13.000Z | southwest-headers.py | WGriffing/southwest-headers | 0dd4ac4e2ea50872638499b5b14673aa4b5fa60b | [
"MIT"
] | 2 | 2021-12-20T16:36:41.000Z | 2021-12-28T06:51:17.000Z | # this code is based on original work by @jasonwbarnett.
# https://github.com/pyro2927/SouthwestCheckin/issues/70#issuecomment-921166994
import json
import time
import re
import os
import random
import string
import sys
from pathlib import Path
from seleniumwire import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
confirmation_number = ''.join(random.choices(string.ascii_uppercase, k=6))
first_name = ''.join(random.choices(string.ascii_lowercase, k=random.randrange(4,10))).capitalize()
last_name = ''.join(random.choices(string.ascii_lowercase, k=random.randrange(4,10))).capitalize()
output_file = sys.argv[1] if len(sys.argv) > 1 else "southwest_headers.json"
chrome_options = Options()
chrome_options.headless = True
# the headless option adds HeadlessChrome to the user agent which causes southwest to return invalid headers. so need to set a user agent that appears like a normal web browser.
chrome_options.add_argument('user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.93 Safari/537.36')
# fixes issue when user runs as root
# https://stackoverflow.com/questions/50642308/webdriverexception-unknown-error-devtoolsactiveport-file-doesnt-exist-while-t
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
# fixes issue if user doesn't have write permissions to default storage location
seleniumwire_options = { 'request_storage': 'memory' }
driver = webdriver.Chrome(os.getcwd() + "/chromedriver", options=chrome_options, seleniumwire_options=seleniumwire_options)
driver.scopes = [ "page\/check-in" ] # only capture request URLs matching this regex
driver.get("https://mobile.southwest.com/check-in")
# fill out the form once the form fields become available
element = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.NAME, "recordLocator")))
element.send_keys(confirmation_number)
WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.NAME, "firstName"))).send_keys(first_name)
WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.NAME, "lastName"))).send_keys(last_name)
element.submit()
# give the form time to submit before checking headers
time.sleep(10)
# content-type is a required header but not included in the request headers so we'll manually add it here.
southwest_headers = { "content-type": "application/json" }
headers = driver.requests[0].headers
for key in headers:
if re.match("x-api-key|x-user-experience-id|x-channel-id|^[\w-]+?-\w$", key, re.I):
# only keep the headers we need
southwest_headers[key] = headers[key]
# save headers
with open(output_file, "w") as json_file:
json.dump(southwest_headers, json_file)
driver.quit()
| 43.895522 | 177 | 0.782387 |
me
import re
import os
import random
import string
import sys
from pathlib import Path
from seleniumwire import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
confirmation_number = ''.join(random.choices(string.ascii_uppercase, k=6))
first_name = ''.join(random.choices(string.ascii_lowercase, k=random.randrange(4,10))).capitalize()
last_name = ''.join(random.choices(string.ascii_lowercase, k=random.randrange(4,10))).capitalize()
output_file = sys.argv[1] if len(sys.argv) > 1 else "southwest_headers.json"
chrome_options = Options()
chrome_options.headless = True
chrome_options.add_argument('user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.93 Safari/537.36')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
seleniumwire_options = { 'request_storage': 'memory' }
driver = webdriver.Chrome(os.getcwd() + "/chromedriver", options=chrome_options, seleniumwire_options=seleniumwire_options)
driver.scopes = [ "page\/check-in" ] # only capture request URLs matching this regex
driver.get("https://mobile.southwest.com/check-in")
# fill out the form once the form fields become available
element = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.NAME, "recordLocator")))
element.send_keys(confirmation_number)
WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.NAME, "firstName"))).send_keys(first_name)
WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.NAME, "lastName"))).send_keys(last_name)
element.submit()
# give the form time to submit before checking headers
time.sleep(10)
# content-type is a required header but not included in the request headers so we'll manually add it here.
southwest_headers = { "content-type": "application/json" }
headers = driver.requests[0].headers
for key in headers:
if re.match("x-api-key|x-user-experience-id|x-channel-id|^[\w-]+?-\w$", key, re.I):
southwest_headers[key] = headers[key]
with open(output_file, "w") as json_file:
json.dump(southwest_headers, json_file)
driver.quit()
| true | true |
f71adbc103a2cba26d96345692d6ef2e185b7c56 | 5,133 | py | Python | part_14/setfirmware.py | zcutlip/broken_abandoned | 76f2f8577de3c1a570e28f081349e6f22fb33a34 | [
"MIT"
] | 28 | 2015-05-02T22:19:26.000Z | 2021-04-26T20:01:00.000Z | part_14/setfirmware.py | zcutlip/broken_abandoned | 76f2f8577de3c1a570e28f081349e6f22fb33a34 | [
"MIT"
] | null | null | null | part_14/setfirmware.py | zcutlip/broken_abandoned | 76f2f8577de3c1a570e28f081349e6f22fb33a34 | [
"MIT"
] | 6 | 2015-11-03T08:12:43.000Z | 2018-08-19T21:48:18.000Z | #!/usr/bin/env python
# Copyright (c) 2015
# - Zachary Cutlip <uid000()gmail.com>
#
# See LICENSE for more details.
#
import sys
import socket
import time
import base64
from bowcaster.common import Logging
HOST="10.12.34.1"
#HOST="192.168.127.141"
class SetFirmwareRequest(object):
"""
Generate a "SetFirmware" SOAP request
Params
------
firmware_file: Optional. The name of a file to base64 encode into
the SOAP request. If no file is provided, a string
of As is used (unencoded) in its place.
logger: Optional. A Bowcaster Logging object. If a logger
is not provided, one will be instantiated.
"""
MIN_CONTENT_LENGTH=102401
def __init__(self,firmware_file=None,logger=None):
b64encode=True
if not logger:
logger=Logging(max_level=Logging.DEBUG)
if firmware_file:
logger.LOG_INFO("Reading firmware data from: %s" % firmware_file)
firmware_data=open(firmware_file,"rb").read()
else:
b64encode=False
logger.LOG_INFO("Generating padding of As in place of firmware data.")
firmware_data="A"*self.MIN_CONTENT_LENGTH
self.request_body=SetFirmwareBody(firmware_data,b64encode=b64encode,logger=logger)
content_length=len(self.request_body)
self.request_headers=SetFirmwareRequestHeaders(content_length)
def __str__(self):
return str(self.request_headers)+str(self.request_body)
class SetFirmwareRequestHeaders(object):
"""
Class to generate the HTTP headers for a "SetFirmware" SOAP request.
Params
------
content_length: Value to specify for the Content-Length header.
"""
def __init__(self,content_length):
headers="".join(["POST /soap/server_sa/SetFirmware HTTP/1.1\r\n",
"Accept-Encoding: identity\r\n",
"Content-Length: %d\r\n",
"Soapaction: \"urn:DeviceConfig\"\r\n",
"Host: 127.0.0.1\r\n",
"User-Agent: Python-urllib/2.7\r\n",
"Connection: close\r\n",
"Content-Type: text/xml ;charset=\"utf-8\"\r\n\r\n"])
self.headers=headers % (content_length)
def __str__(self):
return self.headers
class SetFirmwareBody(object):
"""
Class to generate the body of a "SetFirmware" SOAP request
Params
------
firmware_data: Data to encapsulate in the request.
b64encode: Optional. Boolean flag whether to base64 encode firmware_data.
logger: Optional. A Bowcaster Logging object. If a logger
is not provided, one will be instantiated.
"""
SOAP_REQUEST_START="<SOAP-ENV:Body><NewFirmware>"
SOAP_REQUEST_END="</NewFirmware></SOAP-ENV:Body>"
def __init__(self,firmware_data,b64encode=True,logger=None):
if not logger:
logger=Logging(max_level=Logging.DEBUG)
self.logger=logger
logger.LOG_DEBUG("Building SetFirmware request body.")
logger.LOG_DEBUG("Length of firmware: %d" % len(firmware_data))
if b64encode:
self.encoded_firmware=base64.b64encode(firmware_data)
else:
self.encoded_firmware=firmware_data
logger.LOG_DEBUG("Length of encoded firmware: %d" % len(self.encoded_firmware))
def __len__(self):
return len(self.SOAP_REQUEST_START+self.encoded_firmware+self.SOAP_REQUEST_END)
def __str__(self):
return self.SOAP_REQUEST_START+self.encoded_firmware+self.SOAP_REQUEST_END
def special_upnp_send(addr,port,data):
sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sock.connect((addr,port))
"""only send first 8190 bytes of request"""
sock.send(data[:8190])
"""sleep to ensure first recv()
only gets this first chunk."""
time.sleep(1)
"""Hopefully in upnp_receiv_firmware_packets()
by now, so we can send the rest."""
sock.send(data[8190:])
"""
Sleep a bit more so server doesn't end up
in an infinite select() loop.
Select's timeout is set to 1 sec,
so we need to give enough time
for the loop to go back to select,
and for the timeout to happen,
returning an error."""
time.sleep(10)
sock.close()
def main(firmware_file=None):
logger=Logging(max_level=Logging.DEBUG)
request=SetFirmwareRequest(firmware_file=firmware_file,logger=logger)
#write out the request to a file so we can easily analyze what we sent.
logger.LOG_DEBUG("Writing request to request.bin for analysis.")
open("./request.bin","wb").write(str(request))
logger.LOG_DEBUG("Done.")
logger.LOG_INFO("Sending special UPnP request to host: %s" % HOST)
special_upnp_send(HOST,5000,str(request))
logger.LOG_INFO("Done.")
if __name__ == "__main__":
try:
firmware_file=sys.argv[1]
except:
firmware_file=None
main(firmware_file)
| 32.903846 | 90 | 0.633742 |
import sys
import socket
import time
import base64
from bowcaster.common import Logging
HOST="10.12.34.1"
class SetFirmwareRequest(object):
MIN_CONTENT_LENGTH=102401
def __init__(self,firmware_file=None,logger=None):
b64encode=True
if not logger:
logger=Logging(max_level=Logging.DEBUG)
if firmware_file:
logger.LOG_INFO("Reading firmware data from: %s" % firmware_file)
firmware_data=open(firmware_file,"rb").read()
else:
b64encode=False
logger.LOG_INFO("Generating padding of As in place of firmware data.")
firmware_data="A"*self.MIN_CONTENT_LENGTH
self.request_body=SetFirmwareBody(firmware_data,b64encode=b64encode,logger=logger)
content_length=len(self.request_body)
self.request_headers=SetFirmwareRequestHeaders(content_length)
def __str__(self):
return str(self.request_headers)+str(self.request_body)
class SetFirmwareRequestHeaders(object):
def __init__(self,content_length):
headers="".join(["POST /soap/server_sa/SetFirmware HTTP/1.1\r\n",
"Accept-Encoding: identity\r\n",
"Content-Length: %d\r\n",
"Soapaction: \"urn:DeviceConfig\"\r\n",
"Host: 127.0.0.1\r\n",
"User-Agent: Python-urllib/2.7\r\n",
"Connection: close\r\n",
"Content-Type: text/xml ;charset=\"utf-8\"\r\n\r\n"])
self.headers=headers % (content_length)
def __str__(self):
return self.headers
class SetFirmwareBody(object):
SOAP_REQUEST_START="<SOAP-ENV:Body><NewFirmware>"
SOAP_REQUEST_END="</NewFirmware></SOAP-ENV:Body>"
def __init__(self,firmware_data,b64encode=True,logger=None):
if not logger:
logger=Logging(max_level=Logging.DEBUG)
self.logger=logger
logger.LOG_DEBUG("Building SetFirmware request body.")
logger.LOG_DEBUG("Length of firmware: %d" % len(firmware_data))
if b64encode:
self.encoded_firmware=base64.b64encode(firmware_data)
else:
self.encoded_firmware=firmware_data
logger.LOG_DEBUG("Length of encoded firmware: %d" % len(self.encoded_firmware))
def __len__(self):
return len(self.SOAP_REQUEST_START+self.encoded_firmware+self.SOAP_REQUEST_END)
def __str__(self):
return self.SOAP_REQUEST_START+self.encoded_firmware+self.SOAP_REQUEST_END
def special_upnp_send(addr,port,data):
sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sock.connect((addr,port))
sock.send(data[:8190])
time.sleep(1)
sock.send(data[8190:])
time.sleep(10)
sock.close()
def main(firmware_file=None):
logger=Logging(max_level=Logging.DEBUG)
request=SetFirmwareRequest(firmware_file=firmware_file,logger=logger)
logger.LOG_DEBUG("Writing request to request.bin for analysis.")
open("./request.bin","wb").write(str(request))
logger.LOG_DEBUG("Done.")
logger.LOG_INFO("Sending special UPnP request to host: %s" % HOST)
special_upnp_send(HOST,5000,str(request))
logger.LOG_INFO("Done.")
if __name__ == "__main__":
try:
firmware_file=sys.argv[1]
except:
firmware_file=None
main(firmware_file)
| true | true |
f71adbd3d7f37d3e3cf4898f63bddfd194187306 | 4,593 | py | Python | DQMOffline/L1Trigger/test/runDQMOffline_step1_L1TStage2CaloLayer2_cfg.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | 1 | 2019-08-09T08:42:11.000Z | 2019-08-09T08:42:11.000Z | DQMOffline/L1Trigger/test/runDQMOffline_step1_L1TStage2CaloLayer2_cfg.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | null | null | null | DQMOffline/L1Trigger/test/runDQMOffline_step1_L1TStage2CaloLayer2_cfg.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | 1 | 2019-04-03T19:23:27.000Z | 2019-04-03T19:23:27.000Z | import os
import FWCore.ParameterSet.Config as cms
from FWCore.ParameterSet.VarParsing import VarParsing
from Configuration.StandardSequences.Eras import eras
def get_root_files(path):
files = os.listdir(path)
root_files = [f for f in files if f.endswith(".root")]
full_paths = [os.path.join(path, f) for f in root_files]
urls = ['file://{0}'.format(f) for f in full_paths]
return urls
options = VarParsing('analysis')
options.register(
'sample',
'TTJet',
VarParsing.multiplicity.singleton,
VarParsing.varType.string,
)
options.setDefault('maxEvents', 2000)
options.setDefault(
'outputFile', 'L1TOffline_L1TStage2CaloLayer2_job1_RAW2DIGI_RECO_DQM.root')
options.parseArguments()
inputFiles = {
'TTJet': get_root_files('/data/TTJet/reco'),
'DoubleEG': get_root_files('/data/DoubleEG'),
}
inputFilesRAW = {
'TTJet': get_root_files('/data/TTJet/raw'),
}
process = cms.Process('L1TStage2EmulatorDQM', eras.Run2_2016)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.RawToDigi_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load(
'Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
# load DQM
process.load("DQMServices.Core.DQM_cfg")
process.load("DQMServices.Components.DQMEnvironment_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = int(options.maxEvents / 10)
process.maxEvents = cms.untracked.PSet(
input=cms.untracked.int32(options.maxEvents)
)
# Input source
process.source = cms.Source(
"PoolSource",
fileNames=cms.untracked.vstring(inputFiles[options.sample]),
)
if options.sample == 'TTJet':
process.source.secondaryFileNames = cms.untracked.vstring(inputFilesRAW[
'TTJet'])
process.options = cms.untracked.PSet(
)
# Output definition
process.DQMoutput = cms.OutputModule(
"DQMRootOutputModule",
fileName=cms.untracked.string(options.outputFile)
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
if options.sample == 'TTJet':
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_mc', '')
else:
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_data', '')
# Path and EndPath definitions
process.raw2digi_step = cms.Path(process.RawToDigi)
process.load('DQMOffline.L1Trigger.L1TEtSumJetOffline_cfi')
process.load('DQMOffline.L1Trigger.L1TEGammaOffline_cfi')
process.load('DQMOffline.L1Trigger.L1TTauOffline_cfi')
if os.environ.get('DEBUG', False):
process.MessageLogger.cout.threshold = cms.untracked.string('DEBUG')
process.MessageLogger.debugModules = cms.untracked.vstring(
'*',
)
# pfMETT1 from https://github.com/cms-sw/cmssw/blob/master/DQMOffline/JetMET/python/jetMETDQMOfflineSource_cff.py#L109,
# is difficult to set up, let's use pfMet for testing
process.l1tPFMetNoMuForDQM.pfMETCollection = 'pfMet'
process.dqmoffline_step = cms.Path(
process.goodPFJetsForL1T *
process.l1tPFMetNoMuForDQM *
process.l1tEtSumJetOfflineDQMEmu +
process.l1tEtSumJetOfflineDQM +
process.l1tEGammaOfflineDQM +
process.l1tEGammaOfflineDQMEmu +
process.l1tTauOfflineDQM +
process.l1tTauOfflineDQMEmu
)
if options.sample != 'TTJet':
process.dqmoffline_step.remove(process.l1tEtSumJetOfflineDQMEmu)
process.dqmoffline_step.remove(process.l1tEGammaOfflineDQMEmu)
process.dqmoffline_step.remove(process.l1tTauOfflineDQMEmu)
process.DQMoutput_step = cms.EndPath(process.DQMoutput)
# Schedule definition
process.schedule = cms.Schedule(
process.raw2digi_step,
)
# customisation of the process.
# Automatic addition of the customisation function from
# L1Trigger.Configuration.customiseReEmul
from L1Trigger.Configuration.customiseReEmul import L1TReEmulFromRAW
# call to customisation function L1TReEmulFromRAW imported from
# L1Trigger.Configuration.customiseReEmul
# complains about
# AttributeError: 'Process' object has no attribute 'simRctDigis'
# process = L1TReEmulFromRAW(process)
process.schedule.append(process.dqmoffline_step)
process.schedule.append(process.DQMoutput_step)
| 32.34507 | 119 | 0.775963 | import os
import FWCore.ParameterSet.Config as cms
from FWCore.ParameterSet.VarParsing import VarParsing
from Configuration.StandardSequences.Eras import eras
def get_root_files(path):
files = os.listdir(path)
root_files = [f for f in files if f.endswith(".root")]
full_paths = [os.path.join(path, f) for f in root_files]
urls = ['file://{0}'.format(f) for f in full_paths]
return urls
options = VarParsing('analysis')
options.register(
'sample',
'TTJet',
VarParsing.multiplicity.singleton,
VarParsing.varType.string,
)
options.setDefault('maxEvents', 2000)
options.setDefault(
'outputFile', 'L1TOffline_L1TStage2CaloLayer2_job1_RAW2DIGI_RECO_DQM.root')
options.parseArguments()
inputFiles = {
'TTJet': get_root_files('/data/TTJet/reco'),
'DoubleEG': get_root_files('/data/DoubleEG'),
}
inputFilesRAW = {
'TTJet': get_root_files('/data/TTJet/raw'),
}
process = cms.Process('L1TStage2EmulatorDQM', eras.Run2_2016)
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.RawToDigi_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load(
'Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.load("DQMServices.Core.DQM_cfg")
process.load("DQMServices.Components.DQMEnvironment_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = int(options.maxEvents / 10)
process.maxEvents = cms.untracked.PSet(
input=cms.untracked.int32(options.maxEvents)
)
process.source = cms.Source(
"PoolSource",
fileNames=cms.untracked.vstring(inputFiles[options.sample]),
)
if options.sample == 'TTJet':
process.source.secondaryFileNames = cms.untracked.vstring(inputFilesRAW[
'TTJet'])
process.options = cms.untracked.PSet(
)
process.DQMoutput = cms.OutputModule(
"DQMRootOutputModule",
fileName=cms.untracked.string(options.outputFile)
)
from Configuration.AlCa.GlobalTag import GlobalTag
if options.sample == 'TTJet':
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_mc', '')
else:
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_data', '')
process.raw2digi_step = cms.Path(process.RawToDigi)
process.load('DQMOffline.L1Trigger.L1TEtSumJetOffline_cfi')
process.load('DQMOffline.L1Trigger.L1TEGammaOffline_cfi')
process.load('DQMOffline.L1Trigger.L1TTauOffline_cfi')
if os.environ.get('DEBUG', False):
process.MessageLogger.cout.threshold = cms.untracked.string('DEBUG')
process.MessageLogger.debugModules = cms.untracked.vstring(
'*',
)
ess.l1tPFMetNoMuForDQM.pfMETCollection = 'pfMet'
process.dqmoffline_step = cms.Path(
process.goodPFJetsForL1T *
process.l1tPFMetNoMuForDQM *
process.l1tEtSumJetOfflineDQMEmu +
process.l1tEtSumJetOfflineDQM +
process.l1tEGammaOfflineDQM +
process.l1tEGammaOfflineDQMEmu +
process.l1tTauOfflineDQM +
process.l1tTauOfflineDQMEmu
)
if options.sample != 'TTJet':
process.dqmoffline_step.remove(process.l1tEtSumJetOfflineDQMEmu)
process.dqmoffline_step.remove(process.l1tEGammaOfflineDQMEmu)
process.dqmoffline_step.remove(process.l1tTauOfflineDQMEmu)
process.DQMoutput_step = cms.EndPath(process.DQMoutput)
# Schedule definition
process.schedule = cms.Schedule(
process.raw2digi_step,
)
# customisation of the process.
# Automatic addition of the customisation function from
# L1Trigger.Configuration.customiseReEmul
from L1Trigger.Configuration.customiseReEmul import L1TReEmulFromRAW
# call to customisation function L1TReEmulFromRAW imported from
# L1Trigger.Configuration.customiseReEmul
# complains about
# AttributeError: 'Process' object has no attribute 'simRctDigis'
# process = L1TReEmulFromRAW(process)
process.schedule.append(process.dqmoffline_step)
process.schedule.append(process.DQMoutput_step)
| true | true |
f71ade1259b4cf0d7e2485e0b9af1fdc780966f1 | 4,862 | py | Python | tests/core/test_tracker_stores.py | vinit134/rasa | 58c122fbf459c587fd947d48af5c76ae30bf39be | [
"Apache-2.0"
] | 3 | 2020-02-04T08:44:02.000Z | 2021-05-25T19:46:55.000Z | tests/core/test_tracker_stores.py | vinit134/rasa | 58c122fbf459c587fd947d48af5c76ae30bf39be | [
"Apache-2.0"
] | 21 | 2019-12-16T17:37:54.000Z | 2020-07-06T06:19:04.000Z | tests/core/test_tracker_stores.py | zijiannc/RASA_NLU | 4ffafb62b7414cabae07149533e01afe5fc26c14 | [
"Apache-2.0"
] | 1 | 2021-03-08T15:04:09.000Z | 2021-03-08T15:04:09.000Z | import pytest
from rasa.core.channels.channel import UserMessage
from rasa.core.domain import Domain
from rasa.core.events import SlotSet, ActionExecuted, Restarted
from rasa.core.tracker_store import (
TrackerStore,
InMemoryTrackerStore,
RedisTrackerStore,
SQLTrackerStore,
)
from rasa.utils.endpoints import EndpointConfig, read_endpoint_config
from tests.core.conftest import DEFAULT_ENDPOINTS_FILE
domain = Domain.load("data/test_domains/default.yml")
def test_get_or_create():
slot_key = "location"
slot_val = "Easter Island"
store = InMemoryTrackerStore(domain)
tracker = store.get_or_create_tracker(UserMessage.DEFAULT_SENDER_ID)
ev = SlotSet(slot_key, slot_val)
tracker.update(ev)
assert tracker.get_slot(slot_key) == slot_val
store.save(tracker)
again = store.get_or_create_tracker(UserMessage.DEFAULT_SENDER_ID)
assert again.get_slot(slot_key) == slot_val
def test_restart_after_retrieval_from_tracker_store(default_domain):
store = InMemoryTrackerStore(default_domain)
tr = store.get_or_create_tracker("myuser")
synth = [ActionExecuted("action_listen") for _ in range(4)]
for e in synth:
tr.update(e)
tr.update(Restarted())
latest_restart = tr.idx_after_latest_restart()
store.save(tr)
tr2 = store.retrieve("myuser")
latest_restart_after_loading = tr2.idx_after_latest_restart()
assert latest_restart == latest_restart_after_loading
def test_tracker_store_remembers_max_history(default_domain):
store = InMemoryTrackerStore(default_domain)
tr = store.get_or_create_tracker("myuser", max_event_history=42)
tr.update(Restarted())
store.save(tr)
tr2 = store.retrieve("myuser")
assert tr._max_event_history == tr2._max_event_history == 42
def test_tracker_store_endpoint_config_loading():
cfg = read_endpoint_config(DEFAULT_ENDPOINTS_FILE, "tracker_store")
assert cfg == EndpointConfig.from_dict(
{
"type": "redis",
"url": "localhost",
"port": 6379,
"db": 0,
"password": "password",
"timeout": 30000,
}
)
def test_find_tracker_store(default_domain):
store = read_endpoint_config(DEFAULT_ENDPOINTS_FILE, "tracker_store")
tracker_store = RedisTrackerStore(
domain=default_domain,
host="localhost",
port=6379,
db=0,
password="password",
record_exp=3000,
)
assert isinstance(
tracker_store, type(TrackerStore.find_tracker_store(default_domain, store))
)
class ExampleTrackerStore(RedisTrackerStore):
def __init__(self, domain, url, port, db, password, record_exp):
super(ExampleTrackerStore, self).__init__(
domain, host=url, port=port, db=db, password=password, record_exp=record_exp
)
def test_tracker_store_from_string(default_domain):
endpoints_path = "data/test_endpoints/custom_tracker_endpoints.yml"
store_config = read_endpoint_config(endpoints_path, "tracker_store")
tracker_store = TrackerStore.find_tracker_store(default_domain, store_config)
assert isinstance(tracker_store, ExampleTrackerStore)
def test_tracker_store_from_invalid_module(default_domain):
endpoints_path = "data/test_endpoints/custom_tracker_endpoints.yml"
store_config = read_endpoint_config(endpoints_path, "tracker_store")
store_config.type = "a.module.which.cannot.be.found"
tracker_store = TrackerStore.find_tracker_store(default_domain, store_config)
assert isinstance(tracker_store, InMemoryTrackerStore)
def test_tracker_store_from_invalid_string(default_domain):
endpoints_path = "data/test_endpoints/custom_tracker_endpoints.yml"
store_config = read_endpoint_config(endpoints_path, "tracker_store")
store_config.type = "any string"
tracker_store = TrackerStore.find_tracker_store(default_domain, store_config)
assert isinstance(tracker_store, InMemoryTrackerStore)
@pytest.mark.parametrize(
"full_url",
[
"postgresql://localhost",
"postgresql://localhost:5432",
"postgresql://user:secret@localhost",
],
)
def test_get_db_url_with_fully_specified_url(full_url):
assert SQLTrackerStore._get_db_url(host=full_url) == full_url
def test_get_db_url_with_port_in_host():
host = "localhost:1234"
dialect = "postgresql"
db = "mydb"
expected = "{}://{}/{}".format(dialect, host, db)
assert (
str(SQLTrackerStore._get_db_url(dialect="postgresql", host=host, db=db))
== expected
)
def test_get_db_url_with_correct_host():
expected = "postgresql://localhost:5005/mydb"
assert (
str(
SQLTrackerStore._get_db_url(
dialect="postgresql", host="localhost", port=5005, db="mydb"
)
)
== expected
)
| 29.646341 | 88 | 0.719663 | import pytest
from rasa.core.channels.channel import UserMessage
from rasa.core.domain import Domain
from rasa.core.events import SlotSet, ActionExecuted, Restarted
from rasa.core.tracker_store import (
TrackerStore,
InMemoryTrackerStore,
RedisTrackerStore,
SQLTrackerStore,
)
from rasa.utils.endpoints import EndpointConfig, read_endpoint_config
from tests.core.conftest import DEFAULT_ENDPOINTS_FILE
domain = Domain.load("data/test_domains/default.yml")
def test_get_or_create():
slot_key = "location"
slot_val = "Easter Island"
store = InMemoryTrackerStore(domain)
tracker = store.get_or_create_tracker(UserMessage.DEFAULT_SENDER_ID)
ev = SlotSet(slot_key, slot_val)
tracker.update(ev)
assert tracker.get_slot(slot_key) == slot_val
store.save(tracker)
again = store.get_or_create_tracker(UserMessage.DEFAULT_SENDER_ID)
assert again.get_slot(slot_key) == slot_val
def test_restart_after_retrieval_from_tracker_store(default_domain):
store = InMemoryTrackerStore(default_domain)
tr = store.get_or_create_tracker("myuser")
synth = [ActionExecuted("action_listen") for _ in range(4)]
for e in synth:
tr.update(e)
tr.update(Restarted())
latest_restart = tr.idx_after_latest_restart()
store.save(tr)
tr2 = store.retrieve("myuser")
latest_restart_after_loading = tr2.idx_after_latest_restart()
assert latest_restart == latest_restart_after_loading
def test_tracker_store_remembers_max_history(default_domain):
store = InMemoryTrackerStore(default_domain)
tr = store.get_or_create_tracker("myuser", max_event_history=42)
tr.update(Restarted())
store.save(tr)
tr2 = store.retrieve("myuser")
assert tr._max_event_history == tr2._max_event_history == 42
def test_tracker_store_endpoint_config_loading():
cfg = read_endpoint_config(DEFAULT_ENDPOINTS_FILE, "tracker_store")
assert cfg == EndpointConfig.from_dict(
{
"type": "redis",
"url": "localhost",
"port": 6379,
"db": 0,
"password": "password",
"timeout": 30000,
}
)
def test_find_tracker_store(default_domain):
store = read_endpoint_config(DEFAULT_ENDPOINTS_FILE, "tracker_store")
tracker_store = RedisTrackerStore(
domain=default_domain,
host="localhost",
port=6379,
db=0,
password="password",
record_exp=3000,
)
assert isinstance(
tracker_store, type(TrackerStore.find_tracker_store(default_domain, store))
)
class ExampleTrackerStore(RedisTrackerStore):
def __init__(self, domain, url, port, db, password, record_exp):
super(ExampleTrackerStore, self).__init__(
domain, host=url, port=port, db=db, password=password, record_exp=record_exp
)
def test_tracker_store_from_string(default_domain):
endpoints_path = "data/test_endpoints/custom_tracker_endpoints.yml"
store_config = read_endpoint_config(endpoints_path, "tracker_store")
tracker_store = TrackerStore.find_tracker_store(default_domain, store_config)
assert isinstance(tracker_store, ExampleTrackerStore)
def test_tracker_store_from_invalid_module(default_domain):
endpoints_path = "data/test_endpoints/custom_tracker_endpoints.yml"
store_config = read_endpoint_config(endpoints_path, "tracker_store")
store_config.type = "a.module.which.cannot.be.found"
tracker_store = TrackerStore.find_tracker_store(default_domain, store_config)
assert isinstance(tracker_store, InMemoryTrackerStore)
def test_tracker_store_from_invalid_string(default_domain):
endpoints_path = "data/test_endpoints/custom_tracker_endpoints.yml"
store_config = read_endpoint_config(endpoints_path, "tracker_store")
store_config.type = "any string"
tracker_store = TrackerStore.find_tracker_store(default_domain, store_config)
assert isinstance(tracker_store, InMemoryTrackerStore)
@pytest.mark.parametrize(
"full_url",
[
"postgresql://localhost",
"postgresql://localhost:5432",
"postgresql://user:secret@localhost",
],
)
def test_get_db_url_with_fully_specified_url(full_url):
assert SQLTrackerStore._get_db_url(host=full_url) == full_url
def test_get_db_url_with_port_in_host():
host = "localhost:1234"
dialect = "postgresql"
db = "mydb"
expected = "{}://{}/{}".format(dialect, host, db)
assert (
str(SQLTrackerStore._get_db_url(dialect="postgresql", host=host, db=db))
== expected
)
def test_get_db_url_with_correct_host():
expected = "postgresql://localhost:5005/mydb"
assert (
str(
SQLTrackerStore._get_db_url(
dialect="postgresql", host="localhost", port=5005, db="mydb"
)
)
== expected
)
| true | true |
f71adf5f0868af6fbec61be245b281682e33dcf5 | 3,884 | py | Python | configs/top_down/resnext/coco/resnext101_coco_256x192.py | ssumin6/buob | 4fb4537423a993cd2894f54cb12f5f3b3fb73141 | [
"Apache-2.0"
] | 5 | 2022-01-13T15:06:45.000Z | 2022-01-28T19:39:54.000Z | configs/top_down/resnext/coco/resnext101_coco_256x192.py | ssumin6/buob | 4fb4537423a993cd2894f54cb12f5f3b3fb73141 | [
"Apache-2.0"
] | null | null | null | configs/top_down/resnext/coco/resnext101_coco_256x192.py | ssumin6/buob | 4fb4537423a993cd2894f54cb12f5f3b3fb73141 | [
"Apache-2.0"
] | 1 | 2021-06-17T13:56:23.000Z | 2021-06-17T13:56:23.000Z | log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=10, metric='mAP', key_indicator='AP')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=17,
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
# model settings
model = dict(
type='TopDown',
pretrained='mmcls://resnext101',
backbone=dict(type='ResNeXt', depth=101),
keypoint_head=dict(
type='TopDownSimpleHead',
in_channels=2048,
out_channels=channel_cfg['num_output_channels'],
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11))
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=False,
det_bbox_thr=0.0,
bbox_file='data/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=8,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/coco'
data = dict(
samples_per_gpu=64,
workers_per_gpu=2,
train=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
| 27.742857 | 79 | 0.627703 | log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=10, metric='mAP', key_indicator='AP')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
])
channel_cfg = dict(
num_output_channels=17,
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
model = dict(
type='TopDown',
pretrained='mmcls://resnext101',
backbone=dict(type='ResNeXt', depth=101),
keypoint_head=dict(
type='TopDownSimpleHead',
in_channels=2048,
out_channels=channel_cfg['num_output_channels'],
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11))
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=False,
det_bbox_thr=0.0,
bbox_file='data/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=8,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/coco'
data = dict(
samples_per_gpu=64,
workers_per_gpu=2,
train=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
| true | true |
f71adf69a610b566f88a21e94d9a8d804f8523db | 7,872 | py | Python | sdk/ml/azure-ai-ml/azure/ai/ml/entities/_assets/_artifacts/model.py | dubiety/azure-sdk-for-python | 62ffa839f5d753594cf0fe63668f454a9d87a346 | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | sdk/ml/azure-ai-ml/azure/ai/ml/entities/_assets/_artifacts/model.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | sdk/ml/azure-ai-ml/azure/ai/ml/entities/_assets/_artifacts/model.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from os import PathLike
from pathlib import Path
from typing import Any, Dict, Union
from azure.ai.ml.constants import BASE_PATH_CONTEXT_KEY, PARAMS_OVERRIDE_KEY, ArmConstants, LONG_URI_FORMAT, AssetTypes
from azure.ai.ml._restclient.v2022_05_01.models import (
ModelContainerData,
ModelVersionDetails,
ModelVersionData,
FlavorData,
)
from azure.ai.ml._schema import ModelSchema
from azure.ai.ml._utils._arm_id_utils import AMLNamedArmId, AMLVersionedArmId
from azure.ai.ml._utils.utils import load_yaml, snake_to_pascal
from azure.ai.ml.entities._assets import Artifact
from .artifact import ArtifactStorageInfo
from azure.ai.ml.entities._util import load_from_dict, get_md5_string
from azure.ai.ml._utils._asset_utils import get_ignore_file, get_object_hash
class Model(Artifact):
"""Model for training and scoring.
:param name: Name of the resource.
:type name: str
:param version: Version of the resource.
:type version: str
:param type: The storage format for this entity. Used for NCD. Possible values include:
"custom_model", "mlflow_model", "triton_model".
:type type: str
:param utc_time_created: Date and time when the model was created, in
UTC ISO 8601 format. (e.g. '2020-10-19 17:44:02.096572')
:type utc_time_created: str
:param flavors: The flavors in which the model can be interpreted.
(e.g. {sklearn: {sklearn_version: 0.23.2}, python_function: {loader_module: office.plrmodel, python_version: 3.6})
:type flavors: Dict[str, Any]
:param path: A remote uri or a local path pointing at a model.
Example: "azureml://subscriptions/my-sub-id/resourcegroups/my-rg/workspaces/myworkspace/datastores/mydatastore/paths/path_on_datastore/"
:type path: str
:param description: Description of the resource.
:type description: str
:param tags: Tag dictionary. Tags can be added, removed, and updated.
:type tags: dict[str, str]
:param properties: The asset property dictionary.
:type properties: dict[str, str]
:param kwargs: A dictionary of additional configuration parameters.
:type kwargs: dict
"""
def __init__(
self,
*,
name: str = None,
version: str = None,
type: str = None,
path: Union[str, PathLike] = None,
utc_time_created: str = None,
flavors: Dict[str, Dict[str, Any]] = None,
description: str = None,
tags: Dict = None,
properties: Dict = None,
**kwargs,
):
self.job_name = kwargs.pop("job_name", None)
super().__init__(
name=name,
version=version,
path=path,
description=description,
tags=tags,
properties=properties,
**kwargs,
)
self.utc_time_created = utc_time_created
self.flavors = dict(flavors) if flavors else None
self._arm_type = ArmConstants.MODEL_VERSION_TYPE
self.type = type or AssetTypes.CUSTOM_MODEL
if self._is_anonymous and self.path:
_ignore_file = get_ignore_file(self.path)
_upload_hash = get_object_hash(self.path, _ignore_file)
self.name = get_md5_string(_upload_hash)
@classmethod
def load(
cls,
path: Union[PathLike, str],
params_override: list = None,
**kwargs,
) -> "Model":
"""Construct a model object from yaml file.
:param path: Path to a local file as the source.
:type path: str
:param params_override: Fields to overwrite on top of the yaml file. Format is [{"field1": "value1"}, {"field2": "value2"}]
:type params_override: list
:param kwargs: A dictionary of additional configuration parameters.
:type kwargs: dict
:return: Constructed model object.
:rtype: Model
"""
yaml_dict = load_yaml(path)
return cls._load(data=yaml_dict, yaml_path=path, params_override=params_override, **kwargs)
# For lack of bidirectional map in Python, defining the mapping in two ways in one dictionary
@classmethod
def _load(
cls,
data: Dict = None,
yaml_path: Union[PathLike, str] = None,
params_override: list = None,
**kwargs,
) -> "Model":
params_override = params_override or []
data = data or {}
context = {
BASE_PATH_CONTEXT_KEY: Path(yaml_path).parent if yaml_path else Path("./"),
PARAMS_OVERRIDE_KEY: params_override,
}
return load_from_dict(ModelSchema, data, context, **kwargs)
def _to_dict(self) -> Dict:
return ModelSchema(context={BASE_PATH_CONTEXT_KEY: "./"}).dump(self)
@classmethod
def _from_rest_object(cls, model_rest_object: ModelVersionData) -> "Model":
rest_model_version: ModelVersionDetails = model_rest_object.properties
arm_id = AMLVersionedArmId(arm_id=model_rest_object.id)
flavors = {key: flavor.data for key, flavor in rest_model_version.flavors.items()}
model = Model(
id=model_rest_object.id,
name=arm_id.asset_name,
version=arm_id.asset_version,
path=rest_model_version.model_uri,
description=rest_model_version.description,
tags=rest_model_version.tags,
flavors=flavors,
properties=rest_model_version.properties,
creation_context=model_rest_object.system_data,
type=rest_model_version.model_type,
job_name=rest_model_version.job_name,
)
return model
@classmethod
def _from_container_rest_object(cls, model_container_rest_object: ModelContainerData) -> "Model":
model = Model(
name=model_container_rest_object.name,
version="1",
id=model_container_rest_object.id,
creation_context=model_container_rest_object.system_data,
)
model.latest_version = model_container_rest_object.properties.latest_version
# Setting version to None since if version is not provided it is defaulted to "1".
# This should go away once container concept is finalized.
model.version = None
return model
def _to_rest_object(self) -> ModelVersionData:
model_version = ModelVersionDetails(
description=self.description,
tags=self.tags,
properties=self.properties,
flavors={key: FlavorData(data=dict(value)) for key, value in self.flavors.items()}
if self.flavors
else None, # flatten OrderedDict to dict
model_type=self.type,
model_uri=self.path,
is_anonymous=self._is_anonymous,
)
model_version_resource = ModelVersionData(properties=model_version)
return model_version_resource
def _update_path(self, asset_artifact: ArtifactStorageInfo) -> None:
aml_datastore_id = AMLNamedArmId(asset_artifact.datastore_arm_id)
self.path = LONG_URI_FORMAT.format(
aml_datastore_id.subscription_id,
aml_datastore_id.resource_group_name,
aml_datastore_id.workspace_name,
aml_datastore_id.asset_name,
asset_artifact.relative_path,
)
def _to_arm_resource_param(self, **kwargs):
properties = self._to_rest_object().properties
return {
self._arm_type: {
ArmConstants.NAME: self.name,
ArmConstants.VERSION: self.version,
ArmConstants.PROPERTIES_PARAMETER_NAME: self._serialize.body(properties, "ModelVersionDetails"),
}
}
| 38.778325 | 144 | 0.650788 |
from os import PathLike
from pathlib import Path
from typing import Any, Dict, Union
from azure.ai.ml.constants import BASE_PATH_CONTEXT_KEY, PARAMS_OVERRIDE_KEY, ArmConstants, LONG_URI_FORMAT, AssetTypes
from azure.ai.ml._restclient.v2022_05_01.models import (
ModelContainerData,
ModelVersionDetails,
ModelVersionData,
FlavorData,
)
from azure.ai.ml._schema import ModelSchema
from azure.ai.ml._utils._arm_id_utils import AMLNamedArmId, AMLVersionedArmId
from azure.ai.ml._utils.utils import load_yaml, snake_to_pascal
from azure.ai.ml.entities._assets import Artifact
from .artifact import ArtifactStorageInfo
from azure.ai.ml.entities._util import load_from_dict, get_md5_string
from azure.ai.ml._utils._asset_utils import get_ignore_file, get_object_hash
class Model(Artifact):
def __init__(
self,
*,
name: str = None,
version: str = None,
type: str = None,
path: Union[str, PathLike] = None,
utc_time_created: str = None,
flavors: Dict[str, Dict[str, Any]] = None,
description: str = None,
tags: Dict = None,
properties: Dict = None,
**kwargs,
):
self.job_name = kwargs.pop("job_name", None)
super().__init__(
name=name,
version=version,
path=path,
description=description,
tags=tags,
properties=properties,
**kwargs,
)
self.utc_time_created = utc_time_created
self.flavors = dict(flavors) if flavors else None
self._arm_type = ArmConstants.MODEL_VERSION_TYPE
self.type = type or AssetTypes.CUSTOM_MODEL
if self._is_anonymous and self.path:
_ignore_file = get_ignore_file(self.path)
_upload_hash = get_object_hash(self.path, _ignore_file)
self.name = get_md5_string(_upload_hash)
@classmethod
def load(
cls,
path: Union[PathLike, str],
params_override: list = None,
**kwargs,
) -> "Model":
yaml_dict = load_yaml(path)
return cls._load(data=yaml_dict, yaml_path=path, params_override=params_override, **kwargs)
@classmethod
def _load(
cls,
data: Dict = None,
yaml_path: Union[PathLike, str] = None,
params_override: list = None,
**kwargs,
) -> "Model":
params_override = params_override or []
data = data or {}
context = {
BASE_PATH_CONTEXT_KEY: Path(yaml_path).parent if yaml_path else Path("./"),
PARAMS_OVERRIDE_KEY: params_override,
}
return load_from_dict(ModelSchema, data, context, **kwargs)
def _to_dict(self) -> Dict:
return ModelSchema(context={BASE_PATH_CONTEXT_KEY: "./"}).dump(self)
@classmethod
def _from_rest_object(cls, model_rest_object: ModelVersionData) -> "Model":
rest_model_version: ModelVersionDetails = model_rest_object.properties
arm_id = AMLVersionedArmId(arm_id=model_rest_object.id)
flavors = {key: flavor.data for key, flavor in rest_model_version.flavors.items()}
model = Model(
id=model_rest_object.id,
name=arm_id.asset_name,
version=arm_id.asset_version,
path=rest_model_version.model_uri,
description=rest_model_version.description,
tags=rest_model_version.tags,
flavors=flavors,
properties=rest_model_version.properties,
creation_context=model_rest_object.system_data,
type=rest_model_version.model_type,
job_name=rest_model_version.job_name,
)
return model
@classmethod
def _from_container_rest_object(cls, model_container_rest_object: ModelContainerData) -> "Model":
model = Model(
name=model_container_rest_object.name,
version="1",
id=model_container_rest_object.id,
creation_context=model_container_rest_object.system_data,
)
model.latest_version = model_container_rest_object.properties.latest_version
model.version = None
return model
def _to_rest_object(self) -> ModelVersionData:
model_version = ModelVersionDetails(
description=self.description,
tags=self.tags,
properties=self.properties,
flavors={key: FlavorData(data=dict(value)) for key, value in self.flavors.items()}
if self.flavors
else None,
model_type=self.type,
model_uri=self.path,
is_anonymous=self._is_anonymous,
)
model_version_resource = ModelVersionData(properties=model_version)
return model_version_resource
def _update_path(self, asset_artifact: ArtifactStorageInfo) -> None:
aml_datastore_id = AMLNamedArmId(asset_artifact.datastore_arm_id)
self.path = LONG_URI_FORMAT.format(
aml_datastore_id.subscription_id,
aml_datastore_id.resource_group_name,
aml_datastore_id.workspace_name,
aml_datastore_id.asset_name,
asset_artifact.relative_path,
)
def _to_arm_resource_param(self, **kwargs):
properties = self._to_rest_object().properties
return {
self._arm_type: {
ArmConstants.NAME: self.name,
ArmConstants.VERSION: self.version,
ArmConstants.PROPERTIES_PARAMETER_NAME: self._serialize.body(properties, "ModelVersionDetails"),
}
}
| true | true |
f71adf72ecef2e4ad8d1dacf64125bbdfd663a2d | 3,023 | py | Python | pixelsort/argparams.py | jackylu97/pixelsort | 24e36518f21636c201ad8624c831e08462a25414 | [
"MIT"
] | 570 | 2015-03-01T16:16:42.000Z | 2022-03-28T23:12:11.000Z | pixelsort/argparams.py | ebanaut/pixelsort | c4a823c8363e27fb0aebd4f8738ee82dc636f6a8 | [
"MIT"
] | 20 | 2016-03-25T16:28:16.000Z | 2021-11-11T21:39:28.000Z | pixelsort/argparams.py | ebanaut/pixelsort | c4a823c8363e27fb0aebd4f8738ee82dc636f6a8 | [
"MIT"
] | 79 | 2015-03-16T20:14:22.000Z | 2022-02-01T17:05:02.000Z | import argparse
import logging
from pixelsort.interval import choices as interval_choices
from pixelsort.sorting import choices as sorting_choices
from pixelsort.constants import DEFAULTS
def parse_args():
parser = argparse.ArgumentParser(description="Pixel mangle an image.")
parser.add_argument("image", help="Input image file path.")
parser.add_argument(
"-o",
"--output",
help="Output image file path, DEFAULTS to the time created.")
parser.add_argument("-i", "--int_function",
choices=interval_choices.keys(),
default=DEFAULTS["interval_function"],
help="Function to determine sorting intervals")
parser.add_argument("-f", "--int_file",
help="Image used for defining intervals.")
parser.add_argument(
"-t",
"--threshold",
type=float,
default=DEFAULTS["lower_threshold"],
help="Pixels darker than this are not sorted, between 0 and 1")
parser.add_argument(
"-u",
"--upper_threshold",
type=float,
default=DEFAULTS["upper_threshold"],
help="Pixels brighter than this are not sorted, between 0 and 1")
parser.add_argument(
"-c",
"--clength",
type=int,
default=DEFAULTS["clength"],
help="Characteristic length of random intervals")
parser.add_argument(
"-a",
"--angle",
type=float,
default=DEFAULTS["angle"],
help="Rotate the image by an angle (in degrees) before sorting")
parser.add_argument(
"-r",
"--randomness",
type=float,
default=DEFAULTS["randomness"],
help="What percentage of intervals are NOT sorted")
parser.add_argument("-s", "--sorting_function",
choices=sorting_choices.keys(),
default=DEFAULTS["sorting_function"],
help="Function to sort pixels by.")
parser.add_argument(
"-m", "--mask", help="Image used for masking parts of the image")
parser.add_argument(
"-l",
"--log_level",
default="WARNING",
help="Print more or less info",
choices=[
"DEBUG",
"INFO",
"WARNING",
"ERROR",
"CRITICAL"])
_args = parser.parse_args()
logging.basicConfig(
format="%(name)s: %(levelname)s - %(message)s",
level=logging.getLevelName(
_args.log_level))
return {
"image_input_path": _args.image,
"image_output_path": _args.output,
"interval_function": _args.int_function,
"interval_file_path": _args.int_file,
"lower_threshold": _args.threshold,
"upper_threshold": _args.upper_threshold,
"clength": _args.clength,
"angle": _args.angle,
"randomness": _args.randomness,
"sorting_function": _args.sorting_function,
"mask_path": _args.mask
}
| 33.966292 | 74 | 0.585511 | import argparse
import logging
from pixelsort.interval import choices as interval_choices
from pixelsort.sorting import choices as sorting_choices
from pixelsort.constants import DEFAULTS
def parse_args():
parser = argparse.ArgumentParser(description="Pixel mangle an image.")
parser.add_argument("image", help="Input image file path.")
parser.add_argument(
"-o",
"--output",
help="Output image file path, DEFAULTS to the time created.")
parser.add_argument("-i", "--int_function",
choices=interval_choices.keys(),
default=DEFAULTS["interval_function"],
help="Function to determine sorting intervals")
parser.add_argument("-f", "--int_file",
help="Image used for defining intervals.")
parser.add_argument(
"-t",
"--threshold",
type=float,
default=DEFAULTS["lower_threshold"],
help="Pixels darker than this are not sorted, between 0 and 1")
parser.add_argument(
"-u",
"--upper_threshold",
type=float,
default=DEFAULTS["upper_threshold"],
help="Pixels brighter than this are not sorted, between 0 and 1")
parser.add_argument(
"-c",
"--clength",
type=int,
default=DEFAULTS["clength"],
help="Characteristic length of random intervals")
parser.add_argument(
"-a",
"--angle",
type=float,
default=DEFAULTS["angle"],
help="Rotate the image by an angle (in degrees) before sorting")
parser.add_argument(
"-r",
"--randomness",
type=float,
default=DEFAULTS["randomness"],
help="What percentage of intervals are NOT sorted")
parser.add_argument("-s", "--sorting_function",
choices=sorting_choices.keys(),
default=DEFAULTS["sorting_function"],
help="Function to sort pixels by.")
parser.add_argument(
"-m", "--mask", help="Image used for masking parts of the image")
parser.add_argument(
"-l",
"--log_level",
default="WARNING",
help="Print more or less info",
choices=[
"DEBUG",
"INFO",
"WARNING",
"ERROR",
"CRITICAL"])
_args = parser.parse_args()
logging.basicConfig(
format="%(name)s: %(levelname)s - %(message)s",
level=logging.getLevelName(
_args.log_level))
return {
"image_input_path": _args.image,
"image_output_path": _args.output,
"interval_function": _args.int_function,
"interval_file_path": _args.int_file,
"lower_threshold": _args.threshold,
"upper_threshold": _args.upper_threshold,
"clength": _args.clength,
"angle": _args.angle,
"randomness": _args.randomness,
"sorting_function": _args.sorting_function,
"mask_path": _args.mask
}
| true | true |
f71ae016370c3402b30ac0827d01b9242344e952 | 1,491 | py | Python | example/blog/admin2.py | montiniz/django-admin2 | eb3ba7e98a68686f80af80c5c3b8c9c10296fe7a | [
"BSD-3-Clause"
] | 1 | 2015-01-16T23:00:09.000Z | 2015-01-16T23:00:09.000Z | example/blog/admin2.py | montiniz/django-admin2 | eb3ba7e98a68686f80af80c5c3b8c9c10296fe7a | [
"BSD-3-Clause"
] | null | null | null | example/blog/admin2.py | montiniz/django-admin2 | eb3ba7e98a68686f80af80c5c3b8c9c10296fe7a | [
"BSD-3-Clause"
] | 1 | 2017-01-18T08:27:21.000Z | 2017-01-18T08:27:21.000Z | # -*- coding: utf-8 -*-
from __future__ import division, absolute_import, unicode_literals
from django.utils.translation import ugettext_lazy
import djadmin2
from djadmin2 import renderers
from djadmin2.actions import DeleteSelectedAction
# Import your custom models
from .actions import (CustomPublishAction, PublishAllItemsAction,
unpublish_items, unpublish_all_items)
from .models import Post, Comment
class CommentInline(djadmin2.Admin2TabularInline):
model = Comment
class PostAdmin(djadmin2.ModelAdmin2):
list_actions = [
DeleteSelectedAction, CustomPublishAction,
PublishAllItemsAction, unpublish_items,
unpublish_all_items,
]
inlines = [CommentInline]
search_fields = ('title', '^body')
list_display = ('title', 'body', 'published', "published_date",)
field_renderers = {
'title': renderers.title_renderer,
}
save_on_top = True
date_hierarchy = "published_date"
ordering = ["-published_date", "title",]
class CommentAdmin(djadmin2.ModelAdmin2):
search_fields = ('body', '=post__title')
list_filter = ['post', ]
actions_on_top = True
actions_on_bottom = True
actions_selection_counter = False
# Register the blog app with a verbose name
djadmin2.default.register_app_verbose_name(
'blog',
ugettext_lazy('My Blog')
)
# Register each model with the admin
djadmin2.default.register(Post, PostAdmin)
djadmin2.default.register(Comment, CommentAdmin)
| 27.611111 | 68 | 0.729041 |
from __future__ import division, absolute_import, unicode_literals
from django.utils.translation import ugettext_lazy
import djadmin2
from djadmin2 import renderers
from djadmin2.actions import DeleteSelectedAction
from .actions import (CustomPublishAction, PublishAllItemsAction,
unpublish_items, unpublish_all_items)
from .models import Post, Comment
class CommentInline(djadmin2.Admin2TabularInline):
model = Comment
class PostAdmin(djadmin2.ModelAdmin2):
list_actions = [
DeleteSelectedAction, CustomPublishAction,
PublishAllItemsAction, unpublish_items,
unpublish_all_items,
]
inlines = [CommentInline]
search_fields = ('title', '^body')
list_display = ('title', 'body', 'published', "published_date",)
field_renderers = {
'title': renderers.title_renderer,
}
save_on_top = True
date_hierarchy = "published_date"
ordering = ["-published_date", "title",]
class CommentAdmin(djadmin2.ModelAdmin2):
search_fields = ('body', '=post__title')
list_filter = ['post', ]
actions_on_top = True
actions_on_bottom = True
actions_selection_counter = False
djadmin2.default.register_app_verbose_name(
'blog',
ugettext_lazy('My Blog')
)
djadmin2.default.register(Post, PostAdmin)
djadmin2.default.register(Comment, CommentAdmin)
| true | true |
f71ae15d38428c33a761ff30c5fe22e701f6415c | 28,827 | py | Python | electrum/lnsweep.py | roth-a/electrum | b6a5f6f2fc11b94bc863d2ccd43f166091badda9 | [
"MIT"
] | null | null | null | electrum/lnsweep.py | roth-a/electrum | b6a5f6f2fc11b94bc863d2ccd43f166091badda9 | [
"MIT"
] | 1 | 2020-01-31T17:11:07.000Z | 2020-01-31T17:11:07.000Z | electrum/lnsweep.py | roth-a/electrum | b6a5f6f2fc11b94bc863d2ccd43f166091badda9 | [
"MIT"
] | null | null | null | # Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
from typing import Optional, Dict, List, Tuple, TYPE_CHECKING, NamedTuple, Callable
from enum import Enum, auto
from .util import bfh, bh2u
from .bitcoin import redeem_script_to_address, dust_threshold
from . import ecc
from .lnutil import (make_commitment_output_to_remote_address, make_commitment_output_to_local_witness_script,
derive_privkey, derive_pubkey, derive_blinded_pubkey, derive_blinded_privkey,
make_htlc_tx_witness, make_htlc_tx_with_open_channel, UpdateAddHtlc,
LOCAL, REMOTE, make_htlc_output_witness_script, UnknownPaymentHash,
get_ordered_channel_configs, privkey_to_pubkey, get_per_commitment_secret_from_seed,
RevocationStore, extract_ctn_from_tx_and_chan, UnableToDeriveSecret, SENT, RECEIVED,
map_htlcs_to_ctx_output_idxs, Direction)
from .transaction import (Transaction, TxOutput, construct_witness, PartialTransaction, PartialTxInput,
PartialTxOutput, TxOutpoint)
from .simple_config import SimpleConfig
from .logging import get_logger
if TYPE_CHECKING:
from .lnchannel import Channel
_logger = get_logger(__name__)
class SweepInfo(NamedTuple):
name: str
csv_delay: int
cltv_expiry: int
gen_tx: Callable[[], Optional[Transaction]]
def create_sweeptxs_for_watchtower(chan: 'Channel', ctx: Transaction, per_commitment_secret: bytes,
sweep_address: str) -> List[Transaction]:
"""Presign sweeping transactions using the just received revoked pcs.
These will only be utilised if the remote breaches.
Sweep 'to_local', and all the HTLCs (two cases: directly from ctx, or from HTLC tx).
"""
# prep
pcp = ecc.ECPrivkey(per_commitment_secret).get_public_key_bytes(compressed=True)
this_conf, other_conf = get_ordered_channel_configs(chan=chan, for_us=False)
other_revocation_privkey = derive_blinded_privkey(other_conf.revocation_basepoint.privkey,
per_commitment_secret)
to_self_delay = other_conf.to_self_delay
this_delayed_pubkey = derive_pubkey(this_conf.delayed_basepoint.pubkey, pcp)
txs = []
# to_local
revocation_pubkey = ecc.ECPrivkey(other_revocation_privkey).get_public_key_bytes(compressed=True)
witness_script = bh2u(make_commitment_output_to_local_witness_script(
revocation_pubkey, to_self_delay, this_delayed_pubkey))
to_local_address = redeem_script_to_address('p2wsh', witness_script)
output_idxs = ctx.get_output_idxs_from_address(to_local_address)
if output_idxs:
output_idx = output_idxs.pop()
sweep_tx = create_sweeptx_ctx_to_local(
sweep_address=sweep_address,
ctx=ctx,
output_idx=output_idx,
witness_script=witness_script,
privkey=other_revocation_privkey,
is_revocation=True,
config=chan.lnworker.config)
if sweep_tx:
txs.append(sweep_tx)
# HTLCs
def create_sweeptx_for_htlc(*, htlc: 'UpdateAddHtlc', htlc_direction: Direction,
ctx_output_idx: int) -> Optional[Transaction]:
htlc_tx_witness_script, htlc_tx = make_htlc_tx_with_open_channel(chan=chan,
pcp=pcp,
subject=REMOTE,
htlc_direction=htlc_direction,
commit=ctx,
htlc=htlc,
ctx_output_idx=ctx_output_idx)
return create_sweeptx_that_spends_htlctx_that_spends_htlc_in_ctx(
to_self_delay=0,
htlc_tx=htlc_tx,
htlctx_witness_script=htlc_tx_witness_script,
sweep_address=sweep_address,
privkey=other_revocation_privkey,
is_revocation=True,
config=chan.lnworker.config)
ctn = extract_ctn_from_tx_and_chan(ctx, chan)
htlc_to_ctx_output_idx_map = map_htlcs_to_ctx_output_idxs(chan=chan,
ctx=ctx,
pcp=pcp,
subject=REMOTE,
ctn=ctn)
for (direction, htlc), (ctx_output_idx, htlc_relative_idx) in htlc_to_ctx_output_idx_map.items():
secondstage_sweep_tx = create_sweeptx_for_htlc(htlc=htlc,
htlc_direction=direction,
ctx_output_idx=ctx_output_idx)
if secondstage_sweep_tx:
txs.append(secondstage_sweep_tx)
return txs
def create_sweeptx_for_their_revoked_ctx(chan: 'Channel', ctx: Transaction, per_commitment_secret: bytes,
sweep_address: str) -> Optional[Callable[[], Optional[Transaction]]]:
# prep
pcp = ecc.ECPrivkey(per_commitment_secret).get_public_key_bytes(compressed=True)
this_conf, other_conf = get_ordered_channel_configs(chan=chan, for_us=False)
other_revocation_privkey = derive_blinded_privkey(other_conf.revocation_basepoint.privkey,
per_commitment_secret)
to_self_delay = other_conf.to_self_delay
this_delayed_pubkey = derive_pubkey(this_conf.delayed_basepoint.pubkey, pcp)
txs = []
# to_local
revocation_pubkey = ecc.ECPrivkey(other_revocation_privkey).get_public_key_bytes(compressed=True)
witness_script = bh2u(make_commitment_output_to_local_witness_script(
revocation_pubkey, to_self_delay, this_delayed_pubkey))
to_local_address = redeem_script_to_address('p2wsh', witness_script)
output_idxs = ctx.get_output_idxs_from_address(to_local_address)
if output_idxs:
output_idx = output_idxs.pop()
sweep_tx = lambda: create_sweeptx_ctx_to_local(
sweep_address=sweep_address,
ctx=ctx,
output_idx=output_idx,
witness_script=witness_script,
privkey=other_revocation_privkey,
is_revocation=True,
config=chan.lnworker.config)
return sweep_tx
return None
def create_sweeptx_for_their_revoked_htlc(chan: 'Channel', ctx: Transaction, htlc_tx: Transaction,
sweep_address: str) -> Optional[SweepInfo]:
x = analyze_ctx(chan, ctx)
if not x:
return
ctn, their_pcp, is_revocation, per_commitment_secret = x
if not is_revocation:
return
# prep
pcp = ecc.ECPrivkey(per_commitment_secret).get_public_key_bytes(compressed=True)
this_conf, other_conf = get_ordered_channel_configs(chan=chan, for_us=False)
other_revocation_privkey = derive_blinded_privkey(other_conf.revocation_basepoint.privkey,
per_commitment_secret)
to_self_delay = other_conf.to_self_delay
this_delayed_pubkey = derive_pubkey(this_conf.delayed_basepoint.pubkey, pcp)
# same witness script as to_local
revocation_pubkey = ecc.ECPrivkey(other_revocation_privkey).get_public_key_bytes(compressed=True)
witness_script = bh2u(make_commitment_output_to_local_witness_script(
revocation_pubkey, to_self_delay, this_delayed_pubkey))
htlc_address = redeem_script_to_address('p2wsh', witness_script)
# check that htlc_tx is a htlc
if htlc_tx.outputs()[0].address != htlc_address:
return
gen_tx = lambda: create_sweeptx_ctx_to_local(
sweep_address=sweep_address,
ctx=htlc_tx,
output_idx=0,
witness_script=witness_script,
privkey=other_revocation_privkey,
is_revocation=True,
config=chan.lnworker.config)
return SweepInfo(name='redeem_htlc2',
csv_delay=0,
cltv_expiry=0,
gen_tx=gen_tx)
def create_sweeptxs_for_our_ctx(*, chan: 'Channel', ctx: Transaction,
sweep_address: str) -> Optional[Dict[str, SweepInfo]]:
"""Handle the case where we force close unilaterally with our latest ctx.
Construct sweep txns for 'to_local', and for all HTLCs (2 txns each).
'to_local' can be swept even if this is a breach (by us),
but HTLCs cannot (old HTLCs are no longer stored).
"""
ctn = extract_ctn_from_tx_and_chan(ctx, chan)
our_conf, their_conf = get_ordered_channel_configs(chan=chan, for_us=True)
our_per_commitment_secret = get_per_commitment_secret_from_seed(
our_conf.per_commitment_secret_seed, RevocationStore.START_INDEX - ctn)
our_pcp = ecc.ECPrivkey(our_per_commitment_secret).get_public_key_bytes(compressed=True)
our_delayed_bp_privkey = ecc.ECPrivkey(our_conf.delayed_basepoint.privkey)
our_localdelayed_privkey = derive_privkey(our_delayed_bp_privkey.secret_scalar, our_pcp)
our_localdelayed_privkey = ecc.ECPrivkey.from_secret_scalar(our_localdelayed_privkey)
their_revocation_pubkey = derive_blinded_pubkey(their_conf.revocation_basepoint.pubkey, our_pcp)
to_self_delay = their_conf.to_self_delay
our_htlc_privkey = derive_privkey(secret=int.from_bytes(our_conf.htlc_basepoint.privkey, 'big'),
per_commitment_point=our_pcp).to_bytes(32, 'big')
our_localdelayed_pubkey = our_localdelayed_privkey.get_public_key_bytes(compressed=True)
to_local_witness_script = bh2u(make_commitment_output_to_local_witness_script(
their_revocation_pubkey, to_self_delay, our_localdelayed_pubkey))
to_local_address = redeem_script_to_address('p2wsh', to_local_witness_script)
their_payment_pubkey = derive_pubkey(their_conf.payment_basepoint.pubkey, our_pcp)
to_remote_address = make_commitment_output_to_remote_address(their_payment_pubkey)
# test ctx
_logger.debug(f'testing our ctx: {to_local_address} {to_remote_address}')
if not ctx.get_output_idxs_from_address(to_local_address) \
and not ctx.get_output_idxs_from_address(to_remote_address):
return
# we have to_local, to_remote.
# other outputs are htlcs
# if they are spent, we need to generate the script
# so, second-stage htlc sweep should not be returned here
if ctn < chan.get_oldest_unrevoked_ctn(LOCAL):
_logger.info("we breached.")
return {}
txs = {} # type: Dict[str, SweepInfo]
# to_local
output_idxs = ctx.get_output_idxs_from_address(to_local_address)
if output_idxs:
output_idx = output_idxs.pop()
sweep_tx = lambda: create_sweeptx_ctx_to_local(
sweep_address=sweep_address,
ctx=ctx,
output_idx=output_idx,
witness_script=to_local_witness_script,
privkey=our_localdelayed_privkey.get_secret_bytes(),
is_revocation=False,
to_self_delay=to_self_delay,
config=chan.lnworker.config)
prevout = ctx.txid() + ':%d'%output_idx
txs[prevout] = SweepInfo(name='our_ctx_to_local',
csv_delay=to_self_delay,
cltv_expiry=0,
gen_tx=sweep_tx)
# HTLCs
def create_txns_for_htlc(*, htlc: 'UpdateAddHtlc', htlc_direction: Direction,
ctx_output_idx: int, htlc_relative_idx: int):
if htlc_direction == RECEIVED:
try:
preimage = chan.lnworker.get_preimage(htlc.payment_hash)
except UnknownPaymentHash as e:
_logger.info(f'trying to sweep htlc from our latest ctx but getting {repr(e)}')
return
else:
preimage = None
htlctx_witness_script, htlc_tx = create_htlctx_that_spends_from_our_ctx(
chan=chan,
our_pcp=our_pcp,
ctx=ctx,
htlc=htlc,
local_htlc_privkey=our_htlc_privkey,
preimage=preimage,
htlc_direction=htlc_direction,
ctx_output_idx=ctx_output_idx,
htlc_relative_idx=htlc_relative_idx)
sweep_tx = lambda: create_sweeptx_that_spends_htlctx_that_spends_htlc_in_ctx(
to_self_delay=to_self_delay,
htlc_tx=htlc_tx,
htlctx_witness_script=htlctx_witness_script,
sweep_address=sweep_address,
privkey=our_localdelayed_privkey.get_secret_bytes(),
is_revocation=False,
config=chan.lnworker.config)
# side effect
txs[htlc_tx.inputs()[0].prevout.to_str()] = SweepInfo(name='first-stage-htlc',
csv_delay=0,
cltv_expiry=htlc_tx.locktime,
gen_tx=lambda: htlc_tx)
txs[htlc_tx.txid() + ':0'] = SweepInfo(name='second-stage-htlc',
csv_delay=to_self_delay,
cltv_expiry=0,
gen_tx=sweep_tx)
# offered HTLCs, in our ctx --> "timeout"
# received HTLCs, in our ctx --> "success"
htlc_to_ctx_output_idx_map = map_htlcs_to_ctx_output_idxs(chan=chan,
ctx=ctx,
pcp=our_pcp,
subject=LOCAL,
ctn=ctn)
for (direction, htlc), (ctx_output_idx, htlc_relative_idx) in htlc_to_ctx_output_idx_map.items():
create_txns_for_htlc(htlc=htlc,
htlc_direction=direction,
ctx_output_idx=ctx_output_idx,
htlc_relative_idx=htlc_relative_idx)
return txs
def analyze_ctx(chan: 'Channel', ctx: Transaction):
# note: the remote sometimes has two valid non-revoked commitment transactions,
# either of which could be broadcast
our_conf, their_conf = get_ordered_channel_configs(chan=chan, for_us=True)
ctn = extract_ctn_from_tx_and_chan(ctx, chan)
per_commitment_secret = None
oldest_unrevoked_remote_ctn = chan.get_oldest_unrevoked_ctn(REMOTE)
if ctn == oldest_unrevoked_remote_ctn:
their_pcp = their_conf.current_per_commitment_point
is_revocation = False
elif ctn == oldest_unrevoked_remote_ctn + 1:
their_pcp = their_conf.next_per_commitment_point
is_revocation = False
elif ctn < oldest_unrevoked_remote_ctn: # breach
try:
per_commitment_secret = chan.revocation_store.retrieve_secret(RevocationStore.START_INDEX - ctn)
except UnableToDeriveSecret:
return
their_pcp = ecc.ECPrivkey(per_commitment_secret).get_public_key_bytes(compressed=True)
is_revocation = True
#_logger.info(f'tx for revoked: {list(txs.keys())}')
elif ctn in chan.data_loss_protect_remote_pcp:
their_pcp = chan.data_loss_protect_remote_pcp[ctn]
is_revocation = False
else:
return
return ctn, their_pcp, is_revocation, per_commitment_secret
def create_sweeptxs_for_their_ctx(*, chan: 'Channel', ctx: Transaction,
sweep_address: str) -> Optional[Dict[str,SweepInfo]]:
"""Handle the case when the remote force-closes with their ctx.
Sweep outputs that do not have a CSV delay ('to_remote' and first-stage HTLCs).
Outputs with CSV delay ('to_local' and second-stage HTLCs) are redeemed by LNWatcher.
"""
txs = {} # type: Dict[str, SweepInfo]
our_conf, their_conf = get_ordered_channel_configs(chan=chan, for_us=True)
x = analyze_ctx(chan, ctx)
if not x:
return
ctn, their_pcp, is_revocation, per_commitment_secret = x
# to_local and to_remote addresses
our_revocation_pubkey = derive_blinded_pubkey(our_conf.revocation_basepoint.pubkey, their_pcp)
their_delayed_pubkey = derive_pubkey(their_conf.delayed_basepoint.pubkey, their_pcp)
witness_script = bh2u(make_commitment_output_to_local_witness_script(
our_revocation_pubkey, our_conf.to_self_delay, their_delayed_pubkey))
to_local_address = redeem_script_to_address('p2wsh', witness_script)
our_payment_pubkey = derive_pubkey(our_conf.payment_basepoint.pubkey, their_pcp)
to_remote_address = make_commitment_output_to_remote_address(our_payment_pubkey)
# test if this is their ctx
_logger.debug(f'testing their ctx: {to_local_address} {to_remote_address}')
if not ctx.get_output_idxs_from_address(to_local_address) \
and not ctx.get_output_idxs_from_address(to_remote_address):
return
if is_revocation:
our_revocation_privkey = derive_blinded_privkey(our_conf.revocation_basepoint.privkey, per_commitment_secret)
gen_tx = create_sweeptx_for_their_revoked_ctx(chan, ctx, per_commitment_secret, chan.sweep_address)
if gen_tx:
tx = gen_tx()
txs[tx.inputs()[0].prevout.to_str()] = SweepInfo(name='to_local_for_revoked_ctx',
csv_delay=0,
cltv_expiry=0,
gen_tx=gen_tx)
# prep
our_htlc_privkey = derive_privkey(secret=int.from_bytes(our_conf.htlc_basepoint.privkey, 'big'), per_commitment_point=their_pcp)
our_htlc_privkey = ecc.ECPrivkey.from_secret_scalar(our_htlc_privkey)
their_htlc_pubkey = derive_pubkey(their_conf.htlc_basepoint.pubkey, their_pcp)
our_payment_bp_privkey = ecc.ECPrivkey(our_conf.payment_basepoint.privkey)
our_payment_privkey = derive_privkey(our_payment_bp_privkey.secret_scalar, their_pcp)
our_payment_privkey = ecc.ECPrivkey.from_secret_scalar(our_payment_privkey)
assert our_payment_pubkey == our_payment_privkey.get_public_key_bytes(compressed=True)
# to_local is handled by lnwatcher
# to_remote
output_idxs = ctx.get_output_idxs_from_address(to_remote_address)
if output_idxs:
output_idx = output_idxs.pop()
prevout = ctx.txid() + ':%d'%output_idx
sweep_tx = lambda: create_sweeptx_their_ctx_to_remote(
sweep_address=sweep_address,
ctx=ctx,
output_idx=output_idx,
our_payment_privkey=our_payment_privkey,
config=chan.lnworker.config)
txs[prevout] = SweepInfo(name='their_ctx_to_remote',
csv_delay=0,
cltv_expiry=0,
gen_tx=sweep_tx)
# HTLCs
def create_sweeptx_for_htlc(htlc: 'UpdateAddHtlc', is_received_htlc: bool,
ctx_output_idx: int) -> None:
if not is_received_htlc and not is_revocation:
try:
preimage = chan.lnworker.get_preimage(htlc.payment_hash)
except UnknownPaymentHash as e:
_logger.info(f'trying to sweep htlc from their latest ctx but getting {repr(e)}')
return
else:
preimage = None
htlc_output_witness_script = make_htlc_output_witness_script(
is_received_htlc=is_received_htlc,
remote_revocation_pubkey=our_revocation_pubkey,
remote_htlc_pubkey=our_htlc_privkey.get_public_key_bytes(compressed=True),
local_htlc_pubkey=their_htlc_pubkey,
payment_hash=htlc.payment_hash,
cltv_expiry=htlc.cltv_expiry)
cltv_expiry = htlc.cltv_expiry if is_received_htlc and not is_revocation else 0
prevout = ctx.txid() + ':%d'%ctx_output_idx
sweep_tx = lambda: create_sweeptx_their_ctx_htlc(
ctx=ctx,
witness_script=htlc_output_witness_script,
sweep_address=sweep_address,
preimage=preimage,
output_idx=ctx_output_idx,
privkey=our_revocation_privkey if is_revocation else our_htlc_privkey.get_secret_bytes(),
is_revocation=is_revocation,
cltv_expiry=cltv_expiry,
config=chan.lnworker.config)
txs[prevout] = SweepInfo(name=f'their_ctx_htlc_{ctx_output_idx}',
csv_delay=0,
cltv_expiry=cltv_expiry,
gen_tx=sweep_tx)
# received HTLCs, in their ctx --> "timeout"
# offered HTLCs, in their ctx --> "success"
htlc_to_ctx_output_idx_map = map_htlcs_to_ctx_output_idxs(chan=chan,
ctx=ctx,
pcp=their_pcp,
subject=REMOTE,
ctn=ctn)
for (direction, htlc), (ctx_output_idx, htlc_relative_idx) in htlc_to_ctx_output_idx_map.items():
create_sweeptx_for_htlc(htlc=htlc,
is_received_htlc=direction == RECEIVED,
ctx_output_idx=ctx_output_idx)
return txs
def create_htlctx_that_spends_from_our_ctx(chan: 'Channel', our_pcp: bytes,
ctx: Transaction, htlc: 'UpdateAddHtlc',
local_htlc_privkey: bytes, preimage: Optional[bytes],
htlc_direction: Direction, htlc_relative_idx: int,
ctx_output_idx: int) -> Tuple[bytes, Transaction]:
assert (htlc_direction == RECEIVED) == bool(preimage), 'preimage is required iff htlc is received'
preimage = preimage or b''
witness_script, htlc_tx = make_htlc_tx_with_open_channel(chan=chan,
pcp=our_pcp,
subject=LOCAL,
htlc_direction=htlc_direction,
commit=ctx,
htlc=htlc,
ctx_output_idx=ctx_output_idx,
name=f'our_ctx_{ctx_output_idx}_htlc_tx_{bh2u(htlc.payment_hash)}')
remote_htlc_sig = chan.get_remote_htlc_sig_for_htlc(htlc_relative_idx=htlc_relative_idx)
local_htlc_sig = bfh(htlc_tx.sign_txin(0, local_htlc_privkey))
txin = htlc_tx.inputs()[0]
witness_program = bfh(Transaction.get_preimage_script(txin))
txin.witness = make_htlc_tx_witness(remote_htlc_sig, local_htlc_sig, preimage, witness_program)
return witness_script, htlc_tx
def create_sweeptx_their_ctx_htlc(ctx: Transaction, witness_script: bytes, sweep_address: str,
preimage: Optional[bytes], output_idx: int,
privkey: bytes, is_revocation: bool,
cltv_expiry: int, config: SimpleConfig) -> Optional[PartialTransaction]:
assert type(cltv_expiry) is int
preimage = preimage or b'' # preimage is required iff (not is_revocation and htlc is offered)
val = ctx.outputs()[output_idx].value
prevout = TxOutpoint(txid=bfh(ctx.txid()), out_idx=output_idx)
txin = PartialTxInput(prevout=prevout)
txin._trusted_value_sats = val
txin.witness_script = witness_script
txin.script_sig = b''
sweep_inputs = [txin]
tx_size_bytes = 200 # TODO (depends on offered/received and is_revocation)
fee = config.estimate_fee(tx_size_bytes, allow_fallback_to_static_rates=True)
outvalue = val - fee
if outvalue <= dust_threshold(): return None
sweep_outputs = [PartialTxOutput.from_address_and_value(sweep_address, outvalue)]
tx = PartialTransaction.from_io(sweep_inputs, sweep_outputs, version=2, locktime=cltv_expiry)
sig = bfh(tx.sign_txin(0, privkey))
if not is_revocation:
witness = construct_witness([sig, preimage, witness_script])
else:
revocation_pubkey = privkey_to_pubkey(privkey)
witness = construct_witness([sig, revocation_pubkey, witness_script])
tx.inputs()[0].witness = bfh(witness)
assert tx.is_complete()
return tx
def create_sweeptx_their_ctx_to_remote(sweep_address: str, ctx: Transaction, output_idx: int,
our_payment_privkey: ecc.ECPrivkey,
config: SimpleConfig) -> Optional[PartialTransaction]:
our_payment_pubkey = our_payment_privkey.get_public_key_hex(compressed=True)
val = ctx.outputs()[output_idx].value
prevout = TxOutpoint(txid=bfh(ctx.txid()), out_idx=output_idx)
txin = PartialTxInput(prevout=prevout)
txin._trusted_value_sats = val
txin.script_type = 'p2wpkh'
txin.pubkeys = [bfh(our_payment_pubkey)]
txin.num_sig = 1
sweep_inputs = [txin]
tx_size_bytes = 110 # approx size of p2wpkh->p2wpkh
fee = config.estimate_fee(tx_size_bytes, allow_fallback_to_static_rates=True)
outvalue = val - fee
if outvalue <= dust_threshold(): return None
sweep_outputs = [PartialTxOutput.from_address_and_value(sweep_address, outvalue)]
sweep_tx = PartialTransaction.from_io(sweep_inputs, sweep_outputs)
sweep_tx.set_rbf(True)
sweep_tx.sign({our_payment_pubkey: (our_payment_privkey.get_secret_bytes(), True)})
if not sweep_tx.is_complete():
raise Exception('channel close sweep tx is not complete')
return sweep_tx
def create_sweeptx_ctx_to_local(*, sweep_address: str, ctx: Transaction, output_idx: int, witness_script: str,
privkey: bytes, is_revocation: bool, config: SimpleConfig,
to_self_delay: int=None) -> Optional[PartialTransaction]:
"""Create a txn that sweeps the 'to_local' output of a commitment
transaction into our wallet.
privkey: either revocation_privkey or localdelayed_privkey
is_revocation: tells us which ^
"""
val = ctx.outputs()[output_idx].value
prevout = TxOutpoint(txid=bfh(ctx.txid()), out_idx=output_idx)
txin = PartialTxInput(prevout=prevout)
txin._trusted_value_sats = val
txin.script_sig = b''
txin.witness_script = bfh(witness_script)
sweep_inputs = [txin]
if not is_revocation:
assert isinstance(to_self_delay, int)
sweep_inputs[0].nsequence = to_self_delay
tx_size_bytes = 121 # approx size of to_local -> p2wpkh
fee = config.estimate_fee(tx_size_bytes, allow_fallback_to_static_rates=True)
outvalue = val - fee
if outvalue <= dust_threshold():
return None
sweep_outputs = [PartialTxOutput.from_address_and_value(sweep_address, outvalue)]
sweep_tx = PartialTransaction.from_io(sweep_inputs, sweep_outputs, version=2)
sig = sweep_tx.sign_txin(0, privkey)
witness = construct_witness([sig, int(is_revocation), witness_script])
sweep_tx.inputs()[0].witness = bfh(witness)
return sweep_tx
def create_sweeptx_that_spends_htlctx_that_spends_htlc_in_ctx(*,
htlc_tx: Transaction, htlctx_witness_script: bytes, sweep_address: str,
privkey: bytes, is_revocation: bool, to_self_delay: int,
config: SimpleConfig) -> Optional[PartialTransaction]:
val = htlc_tx.outputs()[0].value
prevout = TxOutpoint(txid=bfh(htlc_tx.txid()), out_idx=0)
txin = PartialTxInput(prevout=prevout)
txin._trusted_value_sats = val
txin.script_sig = b''
txin.witness_script = htlctx_witness_script
sweep_inputs = [txin]
if not is_revocation:
assert isinstance(to_self_delay, int)
sweep_inputs[0].nsequence = to_self_delay
tx_size_bytes = 200 # TODO
fee = config.estimate_fee(tx_size_bytes, allow_fallback_to_static_rates=True)
outvalue = val - fee
if outvalue <= dust_threshold(): return None
sweep_outputs = [PartialTxOutput.from_address_and_value(sweep_address, outvalue)]
tx = PartialTransaction.from_io(sweep_inputs, sweep_outputs, version=2)
sig = bfh(tx.sign_txin(0, privkey))
witness = construct_witness([sig, int(is_revocation), htlctx_witness_script])
tx.inputs()[0].witness = bfh(witness)
assert tx.is_complete()
return tx
| 52.034296 | 132 | 0.648073 |
from typing import Optional, Dict, List, Tuple, TYPE_CHECKING, NamedTuple, Callable
from enum import Enum, auto
from .util import bfh, bh2u
from .bitcoin import redeem_script_to_address, dust_threshold
from . import ecc
from .lnutil import (make_commitment_output_to_remote_address, make_commitment_output_to_local_witness_script,
derive_privkey, derive_pubkey, derive_blinded_pubkey, derive_blinded_privkey,
make_htlc_tx_witness, make_htlc_tx_with_open_channel, UpdateAddHtlc,
LOCAL, REMOTE, make_htlc_output_witness_script, UnknownPaymentHash,
get_ordered_channel_configs, privkey_to_pubkey, get_per_commitment_secret_from_seed,
RevocationStore, extract_ctn_from_tx_and_chan, UnableToDeriveSecret, SENT, RECEIVED,
map_htlcs_to_ctx_output_idxs, Direction)
from .transaction import (Transaction, TxOutput, construct_witness, PartialTransaction, PartialTxInput,
PartialTxOutput, TxOutpoint)
from .simple_config import SimpleConfig
from .logging import get_logger
if TYPE_CHECKING:
from .lnchannel import Channel
_logger = get_logger(__name__)
class SweepInfo(NamedTuple):
name: str
csv_delay: int
cltv_expiry: int
gen_tx: Callable[[], Optional[Transaction]]
def create_sweeptxs_for_watchtower(chan: 'Channel', ctx: Transaction, per_commitment_secret: bytes,
sweep_address: str) -> List[Transaction]:
pcp = ecc.ECPrivkey(per_commitment_secret).get_public_key_bytes(compressed=True)
this_conf, other_conf = get_ordered_channel_configs(chan=chan, for_us=False)
other_revocation_privkey = derive_blinded_privkey(other_conf.revocation_basepoint.privkey,
per_commitment_secret)
to_self_delay = other_conf.to_self_delay
this_delayed_pubkey = derive_pubkey(this_conf.delayed_basepoint.pubkey, pcp)
txs = []
revocation_pubkey = ecc.ECPrivkey(other_revocation_privkey).get_public_key_bytes(compressed=True)
witness_script = bh2u(make_commitment_output_to_local_witness_script(
revocation_pubkey, to_self_delay, this_delayed_pubkey))
to_local_address = redeem_script_to_address('p2wsh', witness_script)
output_idxs = ctx.get_output_idxs_from_address(to_local_address)
if output_idxs:
output_idx = output_idxs.pop()
sweep_tx = create_sweeptx_ctx_to_local(
sweep_address=sweep_address,
ctx=ctx,
output_idx=output_idx,
witness_script=witness_script,
privkey=other_revocation_privkey,
is_revocation=True,
config=chan.lnworker.config)
if sweep_tx:
txs.append(sweep_tx)
def create_sweeptx_for_htlc(*, htlc: 'UpdateAddHtlc', htlc_direction: Direction,
ctx_output_idx: int) -> Optional[Transaction]:
htlc_tx_witness_script, htlc_tx = make_htlc_tx_with_open_channel(chan=chan,
pcp=pcp,
subject=REMOTE,
htlc_direction=htlc_direction,
commit=ctx,
htlc=htlc,
ctx_output_idx=ctx_output_idx)
return create_sweeptx_that_spends_htlctx_that_spends_htlc_in_ctx(
to_self_delay=0,
htlc_tx=htlc_tx,
htlctx_witness_script=htlc_tx_witness_script,
sweep_address=sweep_address,
privkey=other_revocation_privkey,
is_revocation=True,
config=chan.lnworker.config)
ctn = extract_ctn_from_tx_and_chan(ctx, chan)
htlc_to_ctx_output_idx_map = map_htlcs_to_ctx_output_idxs(chan=chan,
ctx=ctx,
pcp=pcp,
subject=REMOTE,
ctn=ctn)
for (direction, htlc), (ctx_output_idx, htlc_relative_idx) in htlc_to_ctx_output_idx_map.items():
secondstage_sweep_tx = create_sweeptx_for_htlc(htlc=htlc,
htlc_direction=direction,
ctx_output_idx=ctx_output_idx)
if secondstage_sweep_tx:
txs.append(secondstage_sweep_tx)
return txs
def create_sweeptx_for_their_revoked_ctx(chan: 'Channel', ctx: Transaction, per_commitment_secret: bytes,
sweep_address: str) -> Optional[Callable[[], Optional[Transaction]]]:
pcp = ecc.ECPrivkey(per_commitment_secret).get_public_key_bytes(compressed=True)
this_conf, other_conf = get_ordered_channel_configs(chan=chan, for_us=False)
other_revocation_privkey = derive_blinded_privkey(other_conf.revocation_basepoint.privkey,
per_commitment_secret)
to_self_delay = other_conf.to_self_delay
this_delayed_pubkey = derive_pubkey(this_conf.delayed_basepoint.pubkey, pcp)
txs = []
revocation_pubkey = ecc.ECPrivkey(other_revocation_privkey).get_public_key_bytes(compressed=True)
witness_script = bh2u(make_commitment_output_to_local_witness_script(
revocation_pubkey, to_self_delay, this_delayed_pubkey))
to_local_address = redeem_script_to_address('p2wsh', witness_script)
output_idxs = ctx.get_output_idxs_from_address(to_local_address)
if output_idxs:
output_idx = output_idxs.pop()
sweep_tx = lambda: create_sweeptx_ctx_to_local(
sweep_address=sweep_address,
ctx=ctx,
output_idx=output_idx,
witness_script=witness_script,
privkey=other_revocation_privkey,
is_revocation=True,
config=chan.lnworker.config)
return sweep_tx
return None
def create_sweeptx_for_their_revoked_htlc(chan: 'Channel', ctx: Transaction, htlc_tx: Transaction,
sweep_address: str) -> Optional[SweepInfo]:
x = analyze_ctx(chan, ctx)
if not x:
return
ctn, their_pcp, is_revocation, per_commitment_secret = x
if not is_revocation:
return
pcp = ecc.ECPrivkey(per_commitment_secret).get_public_key_bytes(compressed=True)
this_conf, other_conf = get_ordered_channel_configs(chan=chan, for_us=False)
other_revocation_privkey = derive_blinded_privkey(other_conf.revocation_basepoint.privkey,
per_commitment_secret)
to_self_delay = other_conf.to_self_delay
this_delayed_pubkey = derive_pubkey(this_conf.delayed_basepoint.pubkey, pcp)
revocation_pubkey = ecc.ECPrivkey(other_revocation_privkey).get_public_key_bytes(compressed=True)
witness_script = bh2u(make_commitment_output_to_local_witness_script(
revocation_pubkey, to_self_delay, this_delayed_pubkey))
htlc_address = redeem_script_to_address('p2wsh', witness_script)
if htlc_tx.outputs()[0].address != htlc_address:
return
gen_tx = lambda: create_sweeptx_ctx_to_local(
sweep_address=sweep_address,
ctx=htlc_tx,
output_idx=0,
witness_script=witness_script,
privkey=other_revocation_privkey,
is_revocation=True,
config=chan.lnworker.config)
return SweepInfo(name='redeem_htlc2',
csv_delay=0,
cltv_expiry=0,
gen_tx=gen_tx)
def create_sweeptxs_for_our_ctx(*, chan: 'Channel', ctx: Transaction,
sweep_address: str) -> Optional[Dict[str, SweepInfo]]:
ctn = extract_ctn_from_tx_and_chan(ctx, chan)
our_conf, their_conf = get_ordered_channel_configs(chan=chan, for_us=True)
our_per_commitment_secret = get_per_commitment_secret_from_seed(
our_conf.per_commitment_secret_seed, RevocationStore.START_INDEX - ctn)
our_pcp = ecc.ECPrivkey(our_per_commitment_secret).get_public_key_bytes(compressed=True)
our_delayed_bp_privkey = ecc.ECPrivkey(our_conf.delayed_basepoint.privkey)
our_localdelayed_privkey = derive_privkey(our_delayed_bp_privkey.secret_scalar, our_pcp)
our_localdelayed_privkey = ecc.ECPrivkey.from_secret_scalar(our_localdelayed_privkey)
their_revocation_pubkey = derive_blinded_pubkey(their_conf.revocation_basepoint.pubkey, our_pcp)
to_self_delay = their_conf.to_self_delay
our_htlc_privkey = derive_privkey(secret=int.from_bytes(our_conf.htlc_basepoint.privkey, 'big'),
per_commitment_point=our_pcp).to_bytes(32, 'big')
our_localdelayed_pubkey = our_localdelayed_privkey.get_public_key_bytes(compressed=True)
to_local_witness_script = bh2u(make_commitment_output_to_local_witness_script(
their_revocation_pubkey, to_self_delay, our_localdelayed_pubkey))
to_local_address = redeem_script_to_address('p2wsh', to_local_witness_script)
their_payment_pubkey = derive_pubkey(their_conf.payment_basepoint.pubkey, our_pcp)
to_remote_address = make_commitment_output_to_remote_address(their_payment_pubkey)
_logger.debug(f'testing our ctx: {to_local_address} {to_remote_address}')
if not ctx.get_output_idxs_from_address(to_local_address) \
and not ctx.get_output_idxs_from_address(to_remote_address):
return
if ctn < chan.get_oldest_unrevoked_ctn(LOCAL):
_logger.info("we breached.")
return {}
txs = {}
output_idxs = ctx.get_output_idxs_from_address(to_local_address)
if output_idxs:
output_idx = output_idxs.pop()
sweep_tx = lambda: create_sweeptx_ctx_to_local(
sweep_address=sweep_address,
ctx=ctx,
output_idx=output_idx,
witness_script=to_local_witness_script,
privkey=our_localdelayed_privkey.get_secret_bytes(),
is_revocation=False,
to_self_delay=to_self_delay,
config=chan.lnworker.config)
prevout = ctx.txid() + ':%d'%output_idx
txs[prevout] = SweepInfo(name='our_ctx_to_local',
csv_delay=to_self_delay,
cltv_expiry=0,
gen_tx=sweep_tx)
def create_txns_for_htlc(*, htlc: 'UpdateAddHtlc', htlc_direction: Direction,
ctx_output_idx: int, htlc_relative_idx: int):
if htlc_direction == RECEIVED:
try:
preimage = chan.lnworker.get_preimage(htlc.payment_hash)
except UnknownPaymentHash as e:
_logger.info(f'trying to sweep htlc from our latest ctx but getting {repr(e)}')
return
else:
preimage = None
htlctx_witness_script, htlc_tx = create_htlctx_that_spends_from_our_ctx(
chan=chan,
our_pcp=our_pcp,
ctx=ctx,
htlc=htlc,
local_htlc_privkey=our_htlc_privkey,
preimage=preimage,
htlc_direction=htlc_direction,
ctx_output_idx=ctx_output_idx,
htlc_relative_idx=htlc_relative_idx)
sweep_tx = lambda: create_sweeptx_that_spends_htlctx_that_spends_htlc_in_ctx(
to_self_delay=to_self_delay,
htlc_tx=htlc_tx,
htlctx_witness_script=htlctx_witness_script,
sweep_address=sweep_address,
privkey=our_localdelayed_privkey.get_secret_bytes(),
is_revocation=False,
config=chan.lnworker.config)
txs[htlc_tx.inputs()[0].prevout.to_str()] = SweepInfo(name='first-stage-htlc',
csv_delay=0,
cltv_expiry=htlc_tx.locktime,
gen_tx=lambda: htlc_tx)
txs[htlc_tx.txid() + ':0'] = SweepInfo(name='second-stage-htlc',
csv_delay=to_self_delay,
cltv_expiry=0,
gen_tx=sweep_tx)
htlc_to_ctx_output_idx_map = map_htlcs_to_ctx_output_idxs(chan=chan,
ctx=ctx,
pcp=our_pcp,
subject=LOCAL,
ctn=ctn)
for (direction, htlc), (ctx_output_idx, htlc_relative_idx) in htlc_to_ctx_output_idx_map.items():
create_txns_for_htlc(htlc=htlc,
htlc_direction=direction,
ctx_output_idx=ctx_output_idx,
htlc_relative_idx=htlc_relative_idx)
return txs
def analyze_ctx(chan: 'Channel', ctx: Transaction):
our_conf, their_conf = get_ordered_channel_configs(chan=chan, for_us=True)
ctn = extract_ctn_from_tx_and_chan(ctx, chan)
per_commitment_secret = None
oldest_unrevoked_remote_ctn = chan.get_oldest_unrevoked_ctn(REMOTE)
if ctn == oldest_unrevoked_remote_ctn:
their_pcp = their_conf.current_per_commitment_point
is_revocation = False
elif ctn == oldest_unrevoked_remote_ctn + 1:
their_pcp = their_conf.next_per_commitment_point
is_revocation = False
elif ctn < oldest_unrevoked_remote_ctn:
try:
per_commitment_secret = chan.revocation_store.retrieve_secret(RevocationStore.START_INDEX - ctn)
except UnableToDeriveSecret:
return
their_pcp = ecc.ECPrivkey(per_commitment_secret).get_public_key_bytes(compressed=True)
is_revocation = True
elif ctn in chan.data_loss_protect_remote_pcp:
their_pcp = chan.data_loss_protect_remote_pcp[ctn]
is_revocation = False
else:
return
return ctn, their_pcp, is_revocation, per_commitment_secret
def create_sweeptxs_for_their_ctx(*, chan: 'Channel', ctx: Transaction,
sweep_address: str) -> Optional[Dict[str,SweepInfo]]:
txs = {}
our_conf, their_conf = get_ordered_channel_configs(chan=chan, for_us=True)
x = analyze_ctx(chan, ctx)
if not x:
return
ctn, their_pcp, is_revocation, per_commitment_secret = x
our_revocation_pubkey = derive_blinded_pubkey(our_conf.revocation_basepoint.pubkey, their_pcp)
their_delayed_pubkey = derive_pubkey(their_conf.delayed_basepoint.pubkey, their_pcp)
witness_script = bh2u(make_commitment_output_to_local_witness_script(
our_revocation_pubkey, our_conf.to_self_delay, their_delayed_pubkey))
to_local_address = redeem_script_to_address('p2wsh', witness_script)
our_payment_pubkey = derive_pubkey(our_conf.payment_basepoint.pubkey, their_pcp)
to_remote_address = make_commitment_output_to_remote_address(our_payment_pubkey)
_logger.debug(f'testing their ctx: {to_local_address} {to_remote_address}')
if not ctx.get_output_idxs_from_address(to_local_address) \
and not ctx.get_output_idxs_from_address(to_remote_address):
return
if is_revocation:
our_revocation_privkey = derive_blinded_privkey(our_conf.revocation_basepoint.privkey, per_commitment_secret)
gen_tx = create_sweeptx_for_their_revoked_ctx(chan, ctx, per_commitment_secret, chan.sweep_address)
if gen_tx:
tx = gen_tx()
txs[tx.inputs()[0].prevout.to_str()] = SweepInfo(name='to_local_for_revoked_ctx',
csv_delay=0,
cltv_expiry=0,
gen_tx=gen_tx)
our_htlc_privkey = derive_privkey(secret=int.from_bytes(our_conf.htlc_basepoint.privkey, 'big'), per_commitment_point=their_pcp)
our_htlc_privkey = ecc.ECPrivkey.from_secret_scalar(our_htlc_privkey)
their_htlc_pubkey = derive_pubkey(their_conf.htlc_basepoint.pubkey, their_pcp)
our_payment_bp_privkey = ecc.ECPrivkey(our_conf.payment_basepoint.privkey)
our_payment_privkey = derive_privkey(our_payment_bp_privkey.secret_scalar, their_pcp)
our_payment_privkey = ecc.ECPrivkey.from_secret_scalar(our_payment_privkey)
assert our_payment_pubkey == our_payment_privkey.get_public_key_bytes(compressed=True)
output_idxs = ctx.get_output_idxs_from_address(to_remote_address)
if output_idxs:
output_idx = output_idxs.pop()
prevout = ctx.txid() + ':%d'%output_idx
sweep_tx = lambda: create_sweeptx_their_ctx_to_remote(
sweep_address=sweep_address,
ctx=ctx,
output_idx=output_idx,
our_payment_privkey=our_payment_privkey,
config=chan.lnworker.config)
txs[prevout] = SweepInfo(name='their_ctx_to_remote',
csv_delay=0,
cltv_expiry=0,
gen_tx=sweep_tx)
def create_sweeptx_for_htlc(htlc: 'UpdateAddHtlc', is_received_htlc: bool,
ctx_output_idx: int) -> None:
if not is_received_htlc and not is_revocation:
try:
preimage = chan.lnworker.get_preimage(htlc.payment_hash)
except UnknownPaymentHash as e:
_logger.info(f'trying to sweep htlc from their latest ctx but getting {repr(e)}')
return
else:
preimage = None
htlc_output_witness_script = make_htlc_output_witness_script(
is_received_htlc=is_received_htlc,
remote_revocation_pubkey=our_revocation_pubkey,
remote_htlc_pubkey=our_htlc_privkey.get_public_key_bytes(compressed=True),
local_htlc_pubkey=their_htlc_pubkey,
payment_hash=htlc.payment_hash,
cltv_expiry=htlc.cltv_expiry)
cltv_expiry = htlc.cltv_expiry if is_received_htlc and not is_revocation else 0
prevout = ctx.txid() + ':%d'%ctx_output_idx
sweep_tx = lambda: create_sweeptx_their_ctx_htlc(
ctx=ctx,
witness_script=htlc_output_witness_script,
sweep_address=sweep_address,
preimage=preimage,
output_idx=ctx_output_idx,
privkey=our_revocation_privkey if is_revocation else our_htlc_privkey.get_secret_bytes(),
is_revocation=is_revocation,
cltv_expiry=cltv_expiry,
config=chan.lnworker.config)
txs[prevout] = SweepInfo(name=f'their_ctx_htlc_{ctx_output_idx}',
csv_delay=0,
cltv_expiry=cltv_expiry,
gen_tx=sweep_tx)
htlc_to_ctx_output_idx_map = map_htlcs_to_ctx_output_idxs(chan=chan,
ctx=ctx,
pcp=their_pcp,
subject=REMOTE,
ctn=ctn)
for (direction, htlc), (ctx_output_idx, htlc_relative_idx) in htlc_to_ctx_output_idx_map.items():
create_sweeptx_for_htlc(htlc=htlc,
is_received_htlc=direction == RECEIVED,
ctx_output_idx=ctx_output_idx)
return txs
def create_htlctx_that_spends_from_our_ctx(chan: 'Channel', our_pcp: bytes,
ctx: Transaction, htlc: 'UpdateAddHtlc',
local_htlc_privkey: bytes, preimage: Optional[bytes],
htlc_direction: Direction, htlc_relative_idx: int,
ctx_output_idx: int) -> Tuple[bytes, Transaction]:
assert (htlc_direction == RECEIVED) == bool(preimage), 'preimage is required iff htlc is received'
preimage = preimage or b''
witness_script, htlc_tx = make_htlc_tx_with_open_channel(chan=chan,
pcp=our_pcp,
subject=LOCAL,
htlc_direction=htlc_direction,
commit=ctx,
htlc=htlc,
ctx_output_idx=ctx_output_idx,
name=f'our_ctx_{ctx_output_idx}_htlc_tx_{bh2u(htlc.payment_hash)}')
remote_htlc_sig = chan.get_remote_htlc_sig_for_htlc(htlc_relative_idx=htlc_relative_idx)
local_htlc_sig = bfh(htlc_tx.sign_txin(0, local_htlc_privkey))
txin = htlc_tx.inputs()[0]
witness_program = bfh(Transaction.get_preimage_script(txin))
txin.witness = make_htlc_tx_witness(remote_htlc_sig, local_htlc_sig, preimage, witness_program)
return witness_script, htlc_tx
def create_sweeptx_their_ctx_htlc(ctx: Transaction, witness_script: bytes, sweep_address: str,
preimage: Optional[bytes], output_idx: int,
privkey: bytes, is_revocation: bool,
cltv_expiry: int, config: SimpleConfig) -> Optional[PartialTransaction]:
assert type(cltv_expiry) is int
preimage = preimage or b''
val = ctx.outputs()[output_idx].value
prevout = TxOutpoint(txid=bfh(ctx.txid()), out_idx=output_idx)
txin = PartialTxInput(prevout=prevout)
txin._trusted_value_sats = val
txin.witness_script = witness_script
txin.script_sig = b''
sweep_inputs = [txin]
tx_size_bytes = 200
fee = config.estimate_fee(tx_size_bytes, allow_fallback_to_static_rates=True)
outvalue = val - fee
if outvalue <= dust_threshold(): return None
sweep_outputs = [PartialTxOutput.from_address_and_value(sweep_address, outvalue)]
tx = PartialTransaction.from_io(sweep_inputs, sweep_outputs, version=2, locktime=cltv_expiry)
sig = bfh(tx.sign_txin(0, privkey))
if not is_revocation:
witness = construct_witness([sig, preimage, witness_script])
else:
revocation_pubkey = privkey_to_pubkey(privkey)
witness = construct_witness([sig, revocation_pubkey, witness_script])
tx.inputs()[0].witness = bfh(witness)
assert tx.is_complete()
return tx
def create_sweeptx_their_ctx_to_remote(sweep_address: str, ctx: Transaction, output_idx: int,
our_payment_privkey: ecc.ECPrivkey,
config: SimpleConfig) -> Optional[PartialTransaction]:
our_payment_pubkey = our_payment_privkey.get_public_key_hex(compressed=True)
val = ctx.outputs()[output_idx].value
prevout = TxOutpoint(txid=bfh(ctx.txid()), out_idx=output_idx)
txin = PartialTxInput(prevout=prevout)
txin._trusted_value_sats = val
txin.script_type = 'p2wpkh'
txin.pubkeys = [bfh(our_payment_pubkey)]
txin.num_sig = 1
sweep_inputs = [txin]
tx_size_bytes = 110
fee = config.estimate_fee(tx_size_bytes, allow_fallback_to_static_rates=True)
outvalue = val - fee
if outvalue <= dust_threshold(): return None
sweep_outputs = [PartialTxOutput.from_address_and_value(sweep_address, outvalue)]
sweep_tx = PartialTransaction.from_io(sweep_inputs, sweep_outputs)
sweep_tx.set_rbf(True)
sweep_tx.sign({our_payment_pubkey: (our_payment_privkey.get_secret_bytes(), True)})
if not sweep_tx.is_complete():
raise Exception('channel close sweep tx is not complete')
return sweep_tx
def create_sweeptx_ctx_to_local(*, sweep_address: str, ctx: Transaction, output_idx: int, witness_script: str,
privkey: bytes, is_revocation: bool, config: SimpleConfig,
to_self_delay: int=None) -> Optional[PartialTransaction]:
val = ctx.outputs()[output_idx].value
prevout = TxOutpoint(txid=bfh(ctx.txid()), out_idx=output_idx)
txin = PartialTxInput(prevout=prevout)
txin._trusted_value_sats = val
txin.script_sig = b''
txin.witness_script = bfh(witness_script)
sweep_inputs = [txin]
if not is_revocation:
assert isinstance(to_self_delay, int)
sweep_inputs[0].nsequence = to_self_delay
tx_size_bytes = 121
fee = config.estimate_fee(tx_size_bytes, allow_fallback_to_static_rates=True)
outvalue = val - fee
if outvalue <= dust_threshold():
return None
sweep_outputs = [PartialTxOutput.from_address_and_value(sweep_address, outvalue)]
sweep_tx = PartialTransaction.from_io(sweep_inputs, sweep_outputs, version=2)
sig = sweep_tx.sign_txin(0, privkey)
witness = construct_witness([sig, int(is_revocation), witness_script])
sweep_tx.inputs()[0].witness = bfh(witness)
return sweep_tx
def create_sweeptx_that_spends_htlctx_that_spends_htlc_in_ctx(*,
htlc_tx: Transaction, htlctx_witness_script: bytes, sweep_address: str,
privkey: bytes, is_revocation: bool, to_self_delay: int,
config: SimpleConfig) -> Optional[PartialTransaction]:
val = htlc_tx.outputs()[0].value
prevout = TxOutpoint(txid=bfh(htlc_tx.txid()), out_idx=0)
txin = PartialTxInput(prevout=prevout)
txin._trusted_value_sats = val
txin.script_sig = b''
txin.witness_script = htlctx_witness_script
sweep_inputs = [txin]
if not is_revocation:
assert isinstance(to_self_delay, int)
sweep_inputs[0].nsequence = to_self_delay
tx_size_bytes = 200
fee = config.estimate_fee(tx_size_bytes, allow_fallback_to_static_rates=True)
outvalue = val - fee
if outvalue <= dust_threshold(): return None
sweep_outputs = [PartialTxOutput.from_address_and_value(sweep_address, outvalue)]
tx = PartialTransaction.from_io(sweep_inputs, sweep_outputs, version=2)
sig = bfh(tx.sign_txin(0, privkey))
witness = construct_witness([sig, int(is_revocation), htlctx_witness_script])
tx.inputs()[0].witness = bfh(witness)
assert tx.is_complete()
return tx
| true | true |
f71ae1ea2aec311b4f7e6cd58d35f326af88dcb8 | 892 | py | Python | python/projeto02/meusite/urls.py | WilliamDeveloper/udemy_cursos | f592bafbe3d2a5d631458f8c42151c880aadef17 | [
"MIT"
] | null | null | null | python/projeto02/meusite/urls.py | WilliamDeveloper/udemy_cursos | f592bafbe3d2a5d631458f8c42151c880aadef17 | [
"MIT"
] | null | null | null | python/projeto02/meusite/urls.py | WilliamDeveloper/udemy_cursos | f592bafbe3d2a5d631458f8c42151c880aadef17 | [
"MIT"
] | null | null | null | """meusite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from home import views
urlpatterns = [
path('', views.index),
path('admin/', admin.site.urls),
path('blog/', include('blog.urls')),
path('sobre/', include('sobre.urls')),
]
| 34.307692 | 77 | 0.693946 | from django.contrib import admin
from django.urls import path, include
from home import views
urlpatterns = [
path('', views.index),
path('admin/', admin.site.urls),
path('blog/', include('blog.urls')),
path('sobre/', include('sobre.urls')),
]
| true | true |
f71ae28487a6c137bf0a9c98196c4d1383a39139 | 489 | py | Python | nnwordembed.py | GLaDO8/pytorch_playground | 3623de18881a37ce413c92d8a63ea9ba1cc401a5 | [
"MIT"
] | 2 | 2019-02-06T18:07:47.000Z | 2020-08-12T21:56:50.000Z | nnwordembed.py | GLaDO8/pytorch_playground | 3623de18881a37ce413c92d8a63ea9ba1cc401a5 | [
"MIT"
] | null | null | null | nnwordembed.py | GLaDO8/pytorch_playground | 3623de18881a37ce413c92d8a63ea9ba1cc401a5 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
torch.manual_seed(1)
word_to_ix = {"hello": 0, "world": 1}
#first argument is the size of the embedded matrix. The second argument is the dimension of each word embedding.
embeds = nn.Embedding(2, 5) # 2 words in vocab, 5 dimensional embeddings
lookup_tensor = torch.tensor([word_to_ix["hello"], word_to_ix["world"]], dtype=torch.long)
hello_embed = embeds(lookup_tensor)
print(hello_embed) | 37.615385 | 113 | 0.766871 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
torch.manual_seed(1)
word_to_ix = {"hello": 0, "world": 1}
embeds = nn.Embedding(2, 5)
lookup_tensor = torch.tensor([word_to_ix["hello"], word_to_ix["world"]], dtype=torch.long)
hello_embed = embeds(lookup_tensor)
print(hello_embed) | true | true |
f71ae39173f45a9863447511633a81e7b7687552 | 1,529 | py | Python | Admin-Scripts/Video2.py | vijayshankarrealdeal/Java | 2dff1a79c91782bf2aeb1bee057b19c41cafd2a1 | [
"MIT"
] | 3 | 2021-03-07T16:29:35.000Z | 2021-03-22T07:41:04.000Z | Admin-Scripts/Video2.py | vijayshankarrealdeal/Java | 2dff1a79c91782bf2aeb1bee057b19c41cafd2a1 | [
"MIT"
] | null | null | null | Admin-Scripts/Video2.py | vijayshankarrealdeal/Java | 2dff1a79c91782bf2aeb1bee057b19c41cafd2a1 | [
"MIT"
] | 2 | 2021-03-08T06:12:52.000Z | 2021-03-14T05:01:19.000Z | import firebase_admin
from firebase_admin import credentials,firestore
from firebase_admin import storage
cred = credentials.Certificate("./adminKey.json")
firebase_admin.initialize_app(cred, {
'storageBucket': 'women-e598c.appspot.com'
})
#Database Methods
db = firestore.client()
#discrip = ""
title = "GenderEquality"
cloudStorageLink = "https://firebasestorage.googleapis.com/v0/b/women-e598c.appspot.com/o/y2mate.com%20-%20Melinda%20Gates%20On%20Marriage%20Gender%20Equality%20%20Solving%20Tough%20Problems.mp4?alt=media&token=82126c96-8141-4634-97ae-85a4725913b5"
name = "Business Insider"
source = "YouTube"
sourceLink = "https://www.youtube.com/watch?v=BuYfALzDPrY"
discription = "Gender equality is key. That means having balanced relationships where both partners split the workload at home. This is something that even Melinda and Bill have had to work at. Gates details all these findings in her new book, The Moment of Lift."
viewsOnVideo = 648,568,59947
socialHandle = " https://read.bi/2xCnzGF"
webpage = "https://www.businessinsider.com"
if(len(title)!=0 and len(cloudStorageLink)!=0):
videsoWrite = db.collection("adminContent").document("Videos").collection("data").document().set({
"title":title,
"name":name,
"source":source,
"sourceLink":sourceLink,
"discription":discription,
"viewsOnVideo":viewsOnVideo,
"socialHandle":socialHandle,
"webpage":webpage,
"cloudStorageLink":cloudStorageLink
})
else:
print("Error")
| 40.236842 | 264 | 0.737083 | import firebase_admin
from firebase_admin import credentials,firestore
from firebase_admin import storage
cred = credentials.Certificate("./adminKey.json")
firebase_admin.initialize_app(cred, {
'storageBucket': 'women-e598c.appspot.com'
})
db = firestore.client()
title = "GenderEquality"
cloudStorageLink = "https://firebasestorage.googleapis.com/v0/b/women-e598c.appspot.com/o/y2mate.com%20-%20Melinda%20Gates%20On%20Marriage%20Gender%20Equality%20%20Solving%20Tough%20Problems.mp4?alt=media&token=82126c96-8141-4634-97ae-85a4725913b5"
name = "Business Insider"
source = "YouTube"
sourceLink = "https://www.youtube.com/watch?v=BuYfALzDPrY"
discription = "Gender equality is key. That means having balanced relationships where both partners split the workload at home. This is something that even Melinda and Bill have had to work at. Gates details all these findings in her new book, The Moment of Lift."
viewsOnVideo = 648,568,59947
socialHandle = " https://read.bi/2xCnzGF"
webpage = "https://www.businessinsider.com"
if(len(title)!=0 and len(cloudStorageLink)!=0):
videsoWrite = db.collection("adminContent").document("Videos").collection("data").document().set({
"title":title,
"name":name,
"source":source,
"sourceLink":sourceLink,
"discription":discription,
"viewsOnVideo":viewsOnVideo,
"socialHandle":socialHandle,
"webpage":webpage,
"cloudStorageLink":cloudStorageLink
})
else:
print("Error")
| true | true |
f71ae3b9a7a99ffd6aec250a1fe54db87c9201ae | 15,356 | py | Python | src/python/analyzer_executor/src/analyzer_executor_lib/analyzer_executor.py | inickles/grapl | f906aba74b2249c9c7d7b1afe6fc540551cdee8b | [
"Apache-2.0"
] | 313 | 2018-10-15T05:58:39.000Z | 2020-04-21T20:31:39.000Z | src/python/analyzer_executor/src/analyzer_executor_lib/analyzer_executor.py | graplsec/grapl | 68386b425c8e9e34f7380a078279b67b316fe2a0 | [
"Apache-2.0"
] | 33 | 2018-10-16T00:47:10.000Z | 2020-03-16T22:32:45.000Z | src/python/analyzer_executor/src/analyzer_executor_lib/analyzer_executor.py | graplsec/grapl | 68386b425c8e9e34f7380a078279b67b316fe2a0 | [
"Apache-2.0"
] | 29 | 2018-11-18T08:39:14.000Z | 2020-04-09T20:59:15.000Z | from __future__ import annotations
import base64
import hashlib
import inspect
import json
import os
import sys
import traceback
from collections import defaultdict
from logging import Logger
from multiprocessing import Pipe, Process
from multiprocessing.connection import Connection
from multiprocessing.pool import ThreadPool
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
Iterator,
List,
Mapping,
Optional,
cast,
)
import boto3
import grapl_analyzerlib.counters # noqa: F401
from analyzer_executor_lib.redis_cache import EitherCache, construct_redis_client
from grapl_analyzerlib.analyzer import Analyzer
from grapl_analyzerlib.execution import ExecutionComplete, ExecutionFailed, ExecutionHit
from grapl_analyzerlib.grapl_client import GraphClient
from grapl_analyzerlib.nodes.base import BaseView
from grapl_analyzerlib.plugin_retriever import load_plugins
from grapl_analyzerlib.queryable import Queryable
from grapl_analyzerlib.subgraph_view import SubgraphView
from grapl_common.env_helpers import S3ResourceFactory
from grapl_common.grapl_logger import get_module_grapl_logger
from grapl_common.metrics.metric_reporter import MetricReporter, TagPair
from grapl_common.sqs.sqs_types import S3PutRecordDict, SQSMessageBody
from python_proto.pipeline import Metadata, OldEnvelope
if TYPE_CHECKING:
from mypy_boto3_s3 import S3ServiceResource
# Set up logger (this is for the whole file, including static methods)
LOGGER = get_module_grapl_logger()
# Set up plugins dir for models
MODEL_PLUGINS_DIR = os.getenv("MODEL_PLUGINS_DIR", "/tmp")
sys.path.insert(0, MODEL_PLUGINS_DIR)
# Ensure plugins dir exists
try:
directory = Path(MODEL_PLUGINS_DIR + "/model_plugins/")
directory.mkdir(parents=True, exist_ok=True)
except Exception as e:
LOGGER.error("Failed to create model plugins directory", e)
def verbose_cast_to_int(input: Optional[str]) -> Optional[int]:
if not input:
return None
try:
return int(input)
except (TypeError, ValueError):
raise ValueError(f"Couldn't cast this env variable into an int: {input}")
class AnalyzerExecutor:
# constants
CHUNK_SIZE_RETRY: int = 10
CHUNK_SIZE_DEFAULT: int = 100
def __init__(
self,
model_plugins_bucket: str,
analyzers_bucket: str,
analyzer_matched_subgraphs_bucket: str,
message_cache: EitherCache,
hit_cache: EitherCache,
chunk_size: int,
logger: Logger,
metric_reporter: MetricReporter,
) -> None:
self.model_plugins_bucket = model_plugins_bucket
self.analyzers_bucket = analyzers_bucket
self.analyzer_matched_subgraphs_bucket = analyzer_matched_subgraphs_bucket
self.message_cache = message_cache
self.hit_cache = hit_cache
self.chunk_size = chunk_size
self.logger = logger
self.metric_reporter = metric_reporter
@classmethod
def from_env(cls, env: Optional[Mapping[str, str]] = None) -> AnalyzerExecutor:
env = env or os.environ
# If we're retrying, change the chunk size
is_retry = bool(env.get("IS_RETRY", False))
if is_retry:
chunk_size = cls.CHUNK_SIZE_RETRY
else:
chunk_size = cls.CHUNK_SIZE_DEFAULT
# Set up message cache
messagecache_addr = env.get("MESSAGECACHE_ADDR")
messagecache_port: Optional[int] = verbose_cast_to_int(
env.get("MESSAGECACHE_PORT")
)
message_cache = construct_redis_client(messagecache_addr, messagecache_port)
# Set up hit cache
hitcache_addr = env.get("HITCACHE_ADDR")
hitcache_port: Optional[int] = verbose_cast_to_int(env.get("HITCACHE_PORT"))
hit_cache = construct_redis_client(hitcache_addr, hitcache_port)
metric_reporter = MetricReporter.create("analyzer-executor")
model_plugins_bucket = env["GRAPL_MODEL_PLUGINS_BUCKET"]
analyzers_bucket = env["GRAPL_ANALYZERS_BUCKET"]
analyzer_matched_subgraphs_bucket = env[
"GRAPL_ANALYZER_MATCHED_SUBGRAPHS_BUCKET"
]
return AnalyzerExecutor(
model_plugins_bucket=model_plugins_bucket,
analyzers_bucket=analyzers_bucket,
analyzer_matched_subgraphs_bucket=analyzer_matched_subgraphs_bucket,
message_cache=message_cache,
hit_cache=hit_cache,
chunk_size=chunk_size,
logger=LOGGER,
metric_reporter=metric_reporter,
)
def check_caches(
self, file_hash: str, msg_id: str, node_key: str, analyzer_name: str
) -> bool:
with self.metric_reporter.histogram_ctx("analyzer-executor.check_caches"):
if self.check_msg_cache(file_hash, node_key, msg_id):
self.logger.debug("cache hit - already processed")
return True
if self.check_hit_cache(analyzer_name, node_key):
self.logger.debug("cache hit - already matched")
return True
return False
def to_event_hash(self, components: Iterable[str]) -> str:
joined = ",".join(components)
event_hash = hashlib.sha256(joined.encode()).hexdigest()
return event_hash
def check_msg_cache(self, file: str, node_key: str, msg_id: str) -> bool:
event_hash = self.to_event_hash((file, node_key, msg_id))
return bool(self.message_cache.get(event_hash))
def update_msg_cache(self, file: str, node_key: str, msg_id: str) -> None:
event_hash = self.to_event_hash((file, node_key, msg_id))
self.message_cache.set(event_hash, "1")
def check_hit_cache(self, file: str, node_key: str) -> bool:
event_hash = self.to_event_hash((file, node_key))
return bool(self.hit_cache.get(event_hash))
def update_hit_cache(self, file: str, node_key: str) -> None:
event_hash = self.to_event_hash((file, node_key))
self.hit_cache.set(event_hash, "1")
async def handle_events(self, events: SQSMessageBody, context: Any) -> None:
# Parse sns message
self.logger.debug(f"handling events: {events} context: {context}")
client = GraphClient()
s3 = S3ResourceFactory(boto3).from_env()
load_plugins(
self.model_plugins_bucket,
s3.meta.client,
os.path.abspath(MODEL_PLUGINS_DIR),
)
for event in events["Records"]:
data = parse_s3_event(s3, event)
# FIXME: this code assumes inner_message is json
envelope = OldEnvelope.deserialize(data)
message = json.loads(envelope.inner_message)
LOGGER.info(f'Executing Analyzer: {message["key"]}')
with self.metric_reporter.histogram_ctx(
"analyzer-executor.download_s3_file"
):
analyzer = download_s3_file(
s3,
self.analyzers_bucket,
message["key"],
).decode("utf8")
analyzer_name = message["key"].split("/")[-2]
subgraph = SubgraphView.from_proto(client, bytes(message["subgraph"]))
# TODO: Validate signature of S3 file
LOGGER.info(f"event {event} {envelope.metadata}")
rx: Connection
tx: Connection
rx, tx = Pipe(duplex=False)
p = Process(
target=self.execute_file,
args=(analyzer_name, analyzer, subgraph, tx, "", self.chunk_size),
)
p.start()
for exec_hit in self.poll_process(rx=rx, analyzer_name=analyzer_name):
with self.metric_reporter.histogram_ctx(
"analyzer-executor.emit_event.ms",
(TagPair("analyzer_name", exec_hit.analyzer_name),),
):
emit_event(
self.analyzer_matched_subgraphs_bucket,
s3,
exec_hit,
envelope.metadata,
)
self.update_msg_cache(analyzer, exec_hit.root_node_key, message["key"])
self.update_hit_cache(analyzer_name, exec_hit.root_node_key)
p.join()
def poll_process(
self,
rx: Connection,
analyzer_name: str,
) -> Iterator[ExecutionHit]:
"""
Keep polling the spawned Process, and yield any ExecutionHits.
(This will probably disappear if Analyzers move to Docker images.)
"""
t = 0
while True:
p_res = rx.poll(timeout=5)
if not p_res:
t += 1
LOGGER.info(
f"Analyzer {analyzer_name} polled for for {t * 5} seconds without result"
)
continue
result: Optional[Any] = rx.recv()
if isinstance(result, ExecutionComplete):
self.logger.info(f"Analyzer {analyzer_name} execution complete")
return
# emit any hits to an S3 bucket
if isinstance(result, ExecutionHit):
self.logger.info(
f"Analyzer {analyzer_name} emitting event for:"
f"{result.analyzer_name} {result.root_node_key}"
)
yield result
assert not isinstance(
result, ExecutionFailed
), f"Analyzer {analyzer_name} failed."
def exec_analyzers(
self,
dg_client: GraphClient,
file: str,
msg_id: str,
nodes: List[BaseView],
analyzers: Dict[str, Analyzer],
sender: Any,
) -> None:
if not analyzers:
self.logger.warning("Received empty dict of analyzers")
return
if not nodes:
self.logger.warning("Received empty array of nodes")
for node in nodes:
querymap: Dict[str, List[Queryable]] = defaultdict(list)
for an_name, analyzer in analyzers.items():
if self.check_caches(file, msg_id, node.node_key, an_name):
continue
queries = analyzer.get_queries()
if isinstance(queries, list) or isinstance(queries, tuple):
querymap[an_name].extend(queries)
else:
querymap[an_name].append(queries)
for an_name, queries in querymap.items():
analyzer = analyzers[an_name]
for query in queries:
# TODO: Whether it was a hit or not is a good Tag
tags = (TagPair("analyzer_name", an_name),)
with self.metric_reporter.histogram_ctx(
"analyzer-executor.query_first.ms", tags
):
response = query.query_first(
dg_client, contains_node_key=node.node_key
)
if response:
self.logger.debug(
f"Analyzer '{an_name}' received a hit, executing on_response()"
)
with self.metric_reporter.histogram_ctx(
"analyzer-executor.on_response.ms", tags
):
analyzer.on_response(response, sender)
def execute_file(
self,
name: str,
file: str,
graph: SubgraphView,
sender: Connection,
msg_id: str,
chunk_size: int,
) -> None:
try:
pool = ThreadPool(processes=4)
exec(file, globals())
client = GraphClient()
analyzers = get_analyzer_objects(client)
if not analyzers:
self.logger.warning(f"Got no analyzers for file: {name}")
self.logger.info(f"Executing analyzers: {[an for an in analyzers.keys()]}")
for nodes in chunker([n for n in graph.node_iter()], chunk_size):
self.logger.info(f"Querying {len(nodes)} nodes")
def exec_analyzer(
nodes: List[BaseView], sender: Connection
) -> List[BaseView]:
try:
self.exec_analyzers(
client, file, msg_id, nodes, analyzers, sender
)
return nodes
except Exception as e:
self.logger.error(traceback.format_exc())
self.logger.error(
f"Execution of {name} failed with {e} {e.args}"
)
sender.send(ExecutionFailed())
raise
pool.apply_async(exec_analyzer, args=(nodes, sender))
pool.close()
pool.join()
sender.send(ExecutionComplete())
except Exception as e:
self.logger.error(traceback.format_exc())
self.logger.error(f"Execution of {name} failed with {e} {e.args}")
sender.send(ExecutionFailed())
raise
def parse_s3_event(s3: S3ServiceResource, event: S3PutRecordDict) -> bytes:
try:
bucket = event["s3"]["bucket"]["name"]
key = event["s3"]["object"]["key"]
except KeyError:
LOGGER.error("Could not parse s3 event: {}", exc_info=True)
raise
return download_s3_file(s3, bucket, key)
def download_s3_file(s3: S3ServiceResource, bucket: str, key: str) -> bytes:
obj = s3.Object(bucket, key)
return cast(bytes, obj.get()["Body"].read())
def is_analyzer(analyzer_name: str, analyzer_cls: type) -> bool:
if analyzer_name == "Analyzer": # This is the base class
return False
return (
hasattr(analyzer_cls, "get_queries")
and hasattr(analyzer_cls, "build")
and hasattr(analyzer_cls, "on_response")
)
def get_analyzer_objects(dgraph_client: GraphClient) -> Dict[str, Analyzer]:
clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
return {
an[0]: an[1].build(dgraph_client)
for an in clsmembers
if is_analyzer(an[0], an[1])
}
def chunker(seq: List[BaseView], size: int) -> List[List[BaseView]]:
return [seq[pos : pos + size] for pos in range(0, len(seq), size)]
def emit_event(
analyzer_matched_subgraphs_bucket: str,
s3: S3ServiceResource,
event: ExecutionHit,
metadata: Metadata,
) -> None:
LOGGER.info(f"emitting event for: {event.analyzer_name, event.nodes}")
meta_dict = {
"trace_id": str(metadata.trace_id),
}
event_s = json.dumps(
{
"nodes": json.loads(event.nodes),
"edges": json.loads(event.edges),
"analyzer_name": event.analyzer_name,
"risk_score": event.risk_score,
"lenses": event.lenses,
"risky_node_keys": event.risky_node_keys,
"metadata": meta_dict,
}
)
event_hash = hashlib.sha256(event_s.encode())
key = base64.urlsafe_b64encode(event_hash.digest()).decode("utf-8")
obj = s3.Object(analyzer_matched_subgraphs_bucket, key)
obj.put(Body=event_s.encode("utf-8"))
| 34.276786 | 93 | 0.604259 | from __future__ import annotations
import base64
import hashlib
import inspect
import json
import os
import sys
import traceback
from collections import defaultdict
from logging import Logger
from multiprocessing import Pipe, Process
from multiprocessing.connection import Connection
from multiprocessing.pool import ThreadPool
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
Iterator,
List,
Mapping,
Optional,
cast,
)
import boto3
import grapl_analyzerlib.counters
from analyzer_executor_lib.redis_cache import EitherCache, construct_redis_client
from grapl_analyzerlib.analyzer import Analyzer
from grapl_analyzerlib.execution import ExecutionComplete, ExecutionFailed, ExecutionHit
from grapl_analyzerlib.grapl_client import GraphClient
from grapl_analyzerlib.nodes.base import BaseView
from grapl_analyzerlib.plugin_retriever import load_plugins
from grapl_analyzerlib.queryable import Queryable
from grapl_analyzerlib.subgraph_view import SubgraphView
from grapl_common.env_helpers import S3ResourceFactory
from grapl_common.grapl_logger import get_module_grapl_logger
from grapl_common.metrics.metric_reporter import MetricReporter, TagPair
from grapl_common.sqs.sqs_types import S3PutRecordDict, SQSMessageBody
from python_proto.pipeline import Metadata, OldEnvelope
if TYPE_CHECKING:
from mypy_boto3_s3 import S3ServiceResource
LOGGER = get_module_grapl_logger()
MODEL_PLUGINS_DIR = os.getenv("MODEL_PLUGINS_DIR", "/tmp")
sys.path.insert(0, MODEL_PLUGINS_DIR)
try:
directory = Path(MODEL_PLUGINS_DIR + "/model_plugins/")
directory.mkdir(parents=True, exist_ok=True)
except Exception as e:
LOGGER.error("Failed to create model plugins directory", e)
def verbose_cast_to_int(input: Optional[str]) -> Optional[int]:
if not input:
return None
try:
return int(input)
except (TypeError, ValueError):
raise ValueError(f"Couldn't cast this env variable into an int: {input}")
class AnalyzerExecutor:
# constants
CHUNK_SIZE_RETRY: int = 10
CHUNK_SIZE_DEFAULT: int = 100
def __init__(
self,
model_plugins_bucket: str,
analyzers_bucket: str,
analyzer_matched_subgraphs_bucket: str,
message_cache: EitherCache,
hit_cache: EitherCache,
chunk_size: int,
logger: Logger,
metric_reporter: MetricReporter,
) -> None:
self.model_plugins_bucket = model_plugins_bucket
self.analyzers_bucket = analyzers_bucket
self.analyzer_matched_subgraphs_bucket = analyzer_matched_subgraphs_bucket
self.message_cache = message_cache
self.hit_cache = hit_cache
self.chunk_size = chunk_size
self.logger = logger
self.metric_reporter = metric_reporter
@classmethod
def from_env(cls, env: Optional[Mapping[str, str]] = None) -> AnalyzerExecutor:
env = env or os.environ
# If we're retrying, change the chunk size
is_retry = bool(env.get("IS_RETRY", False))
if is_retry:
chunk_size = cls.CHUNK_SIZE_RETRY
else:
chunk_size = cls.CHUNK_SIZE_DEFAULT
messagecache_addr = env.get("MESSAGECACHE_ADDR")
messagecache_port: Optional[int] = verbose_cast_to_int(
env.get("MESSAGECACHE_PORT")
)
message_cache = construct_redis_client(messagecache_addr, messagecache_port)
hitcache_addr = env.get("HITCACHE_ADDR")
hitcache_port: Optional[int] = verbose_cast_to_int(env.get("HITCACHE_PORT"))
hit_cache = construct_redis_client(hitcache_addr, hitcache_port)
metric_reporter = MetricReporter.create("analyzer-executor")
model_plugins_bucket = env["GRAPL_MODEL_PLUGINS_BUCKET"]
analyzers_bucket = env["GRAPL_ANALYZERS_BUCKET"]
analyzer_matched_subgraphs_bucket = env[
"GRAPL_ANALYZER_MATCHED_SUBGRAPHS_BUCKET"
]
return AnalyzerExecutor(
model_plugins_bucket=model_plugins_bucket,
analyzers_bucket=analyzers_bucket,
analyzer_matched_subgraphs_bucket=analyzer_matched_subgraphs_bucket,
message_cache=message_cache,
hit_cache=hit_cache,
chunk_size=chunk_size,
logger=LOGGER,
metric_reporter=metric_reporter,
)
def check_caches(
self, file_hash: str, msg_id: str, node_key: str, analyzer_name: str
) -> bool:
with self.metric_reporter.histogram_ctx("analyzer-executor.check_caches"):
if self.check_msg_cache(file_hash, node_key, msg_id):
self.logger.debug("cache hit - already processed")
return True
if self.check_hit_cache(analyzer_name, node_key):
self.logger.debug("cache hit - already matched")
return True
return False
def to_event_hash(self, components: Iterable[str]) -> str:
joined = ",".join(components)
event_hash = hashlib.sha256(joined.encode()).hexdigest()
return event_hash
def check_msg_cache(self, file: str, node_key: str, msg_id: str) -> bool:
event_hash = self.to_event_hash((file, node_key, msg_id))
return bool(self.message_cache.get(event_hash))
def update_msg_cache(self, file: str, node_key: str, msg_id: str) -> None:
event_hash = self.to_event_hash((file, node_key, msg_id))
self.message_cache.set(event_hash, "1")
def check_hit_cache(self, file: str, node_key: str) -> bool:
event_hash = self.to_event_hash((file, node_key))
return bool(self.hit_cache.get(event_hash))
def update_hit_cache(self, file: str, node_key: str) -> None:
event_hash = self.to_event_hash((file, node_key))
self.hit_cache.set(event_hash, "1")
async def handle_events(self, events: SQSMessageBody, context: Any) -> None:
self.logger.debug(f"handling events: {events} context: {context}")
client = GraphClient()
s3 = S3ResourceFactory(boto3).from_env()
load_plugins(
self.model_plugins_bucket,
s3.meta.client,
os.path.abspath(MODEL_PLUGINS_DIR),
)
for event in events["Records"]:
data = parse_s3_event(s3, event)
envelope = OldEnvelope.deserialize(data)
message = json.loads(envelope.inner_message)
LOGGER.info(f'Executing Analyzer: {message["key"]}')
with self.metric_reporter.histogram_ctx(
"analyzer-executor.download_s3_file"
):
analyzer = download_s3_file(
s3,
self.analyzers_bucket,
message["key"],
).decode("utf8")
analyzer_name = message["key"].split("/")[-2]
subgraph = SubgraphView.from_proto(client, bytes(message["subgraph"]))
LOGGER.info(f"event {event} {envelope.metadata}")
rx: Connection
tx: Connection
rx, tx = Pipe(duplex=False)
p = Process(
target=self.execute_file,
args=(analyzer_name, analyzer, subgraph, tx, "", self.chunk_size),
)
p.start()
for exec_hit in self.poll_process(rx=rx, analyzer_name=analyzer_name):
with self.metric_reporter.histogram_ctx(
"analyzer-executor.emit_event.ms",
(TagPair("analyzer_name", exec_hit.analyzer_name),),
):
emit_event(
self.analyzer_matched_subgraphs_bucket,
s3,
exec_hit,
envelope.metadata,
)
self.update_msg_cache(analyzer, exec_hit.root_node_key, message["key"])
self.update_hit_cache(analyzer_name, exec_hit.root_node_key)
p.join()
def poll_process(
self,
rx: Connection,
analyzer_name: str,
) -> Iterator[ExecutionHit]:
t = 0
while True:
p_res = rx.poll(timeout=5)
if not p_res:
t += 1
LOGGER.info(
f"Analyzer {analyzer_name} polled for for {t * 5} seconds without result"
)
continue
result: Optional[Any] = rx.recv()
if isinstance(result, ExecutionComplete):
self.logger.info(f"Analyzer {analyzer_name} execution complete")
return
if isinstance(result, ExecutionHit):
self.logger.info(
f"Analyzer {analyzer_name} emitting event for:"
f"{result.analyzer_name} {result.root_node_key}"
)
yield result
assert not isinstance(
result, ExecutionFailed
), f"Analyzer {analyzer_name} failed."
def exec_analyzers(
self,
dg_client: GraphClient,
file: str,
msg_id: str,
nodes: List[BaseView],
analyzers: Dict[str, Analyzer],
sender: Any,
) -> None:
if not analyzers:
self.logger.warning("Received empty dict of analyzers")
return
if not nodes:
self.logger.warning("Received empty array of nodes")
for node in nodes:
querymap: Dict[str, List[Queryable]] = defaultdict(list)
for an_name, analyzer in analyzers.items():
if self.check_caches(file, msg_id, node.node_key, an_name):
continue
queries = analyzer.get_queries()
if isinstance(queries, list) or isinstance(queries, tuple):
querymap[an_name].extend(queries)
else:
querymap[an_name].append(queries)
for an_name, queries in querymap.items():
analyzer = analyzers[an_name]
for query in queries:
tags = (TagPair("analyzer_name", an_name),)
with self.metric_reporter.histogram_ctx(
"analyzer-executor.query_first.ms", tags
):
response = query.query_first(
dg_client, contains_node_key=node.node_key
)
if response:
self.logger.debug(
f"Analyzer '{an_name}' received a hit, executing on_response()"
)
with self.metric_reporter.histogram_ctx(
"analyzer-executor.on_response.ms", tags
):
analyzer.on_response(response, sender)
def execute_file(
self,
name: str,
file: str,
graph: SubgraphView,
sender: Connection,
msg_id: str,
chunk_size: int,
) -> None:
try:
pool = ThreadPool(processes=4)
exec(file, globals())
client = GraphClient()
analyzers = get_analyzer_objects(client)
if not analyzers:
self.logger.warning(f"Got no analyzers for file: {name}")
self.logger.info(f"Executing analyzers: {[an for an in analyzers.keys()]}")
for nodes in chunker([n for n in graph.node_iter()], chunk_size):
self.logger.info(f"Querying {len(nodes)} nodes")
def exec_analyzer(
nodes: List[BaseView], sender: Connection
) -> List[BaseView]:
try:
self.exec_analyzers(
client, file, msg_id, nodes, analyzers, sender
)
return nodes
except Exception as e:
self.logger.error(traceback.format_exc())
self.logger.error(
f"Execution of {name} failed with {e} {e.args}"
)
sender.send(ExecutionFailed())
raise
pool.apply_async(exec_analyzer, args=(nodes, sender))
pool.close()
pool.join()
sender.send(ExecutionComplete())
except Exception as e:
self.logger.error(traceback.format_exc())
self.logger.error(f"Execution of {name} failed with {e} {e.args}")
sender.send(ExecutionFailed())
raise
def parse_s3_event(s3: S3ServiceResource, event: S3PutRecordDict) -> bytes:
try:
bucket = event["s3"]["bucket"]["name"]
key = event["s3"]["object"]["key"]
except KeyError:
LOGGER.error("Could not parse s3 event: {}", exc_info=True)
raise
return download_s3_file(s3, bucket, key)
def download_s3_file(s3: S3ServiceResource, bucket: str, key: str) -> bytes:
obj = s3.Object(bucket, key)
return cast(bytes, obj.get()["Body"].read())
def is_analyzer(analyzer_name: str, analyzer_cls: type) -> bool:
if analyzer_name == "Analyzer":
return False
return (
hasattr(analyzer_cls, "get_queries")
and hasattr(analyzer_cls, "build")
and hasattr(analyzer_cls, "on_response")
)
def get_analyzer_objects(dgraph_client: GraphClient) -> Dict[str, Analyzer]:
clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
return {
an[0]: an[1].build(dgraph_client)
for an in clsmembers
if is_analyzer(an[0], an[1])
}
def chunker(seq: List[BaseView], size: int) -> List[List[BaseView]]:
return [seq[pos : pos + size] for pos in range(0, len(seq), size)]
def emit_event(
analyzer_matched_subgraphs_bucket: str,
s3: S3ServiceResource,
event: ExecutionHit,
metadata: Metadata,
) -> None:
LOGGER.info(f"emitting event for: {event.analyzer_name, event.nodes}")
meta_dict = {
"trace_id": str(metadata.trace_id),
}
event_s = json.dumps(
{
"nodes": json.loads(event.nodes),
"edges": json.loads(event.edges),
"analyzer_name": event.analyzer_name,
"risk_score": event.risk_score,
"lenses": event.lenses,
"risky_node_keys": event.risky_node_keys,
"metadata": meta_dict,
}
)
event_hash = hashlib.sha256(event_s.encode())
key = base64.urlsafe_b64encode(event_hash.digest()).decode("utf-8")
obj = s3.Object(analyzer_matched_subgraphs_bucket, key)
obj.put(Body=event_s.encode("utf-8"))
| true | true |
f71ae3c9f30e5074c6179b84ba1638c42566b5fa | 6,730 | py | Python | randomdest.py | gbowerman/random-destination | 88f6a6a6e6cf971cb9d4bea477b093a4b0eef84e | [
"MIT"
] | null | null | null | randomdest.py | gbowerman/random-destination | 88f6a6a6e6cf971cb9d4bea477b093a4b0eef84e | [
"MIT"
] | null | null | null | randomdest.py | gbowerman/random-destination | 88f6a6a6e6cf971cb9d4bea477b093a4b0eef84e | [
"MIT"
] | null | null | null | """randomdest.py - dearpygui app to plot random destinations"""
import math
import os
import random
import requests
from dotenv import load_dotenv
from dearpygui.core import *
from dearpygui.simple import *
# globals/constants
EARTH_RADIUS = 6378.1
MAX_DIST = 16 # destination radius in KM
maps_key = ""
BASE_URL = "https://dev.virtualearth.net/REST/v1/Imagery/Map/AerialWithLabels/"
zoom = "18"
distance = MAX_DIST
img_size_x = 900
img_size_y = 900
img_file_name = "pic1.png"
def plot_location(latitude, longitude, bearing, distance):
"""Plot a new location based on starting point, bearing and distance"""
bearing_rad = math.radians(bearing)
lat1 = math.radians(latitude)
lon1 = math.radians(longitude)
d_over_r = distance / EARTH_RADIUS
lat2 = math.asin(
math.sin(lat1) * math.cos(d_over_r)
+ math.cos(lat1) * math.sin(d_over_r) * math.cos(bearing_rad)
)
lon2 = lon1 + math.atan2(
math.sin(bearing_rad) * math.sin(d_over_r) * math.cos(lat1),
math.cos(d_over_r) - math.sin(lat1) * math.sin(lat2),
)
lat2 = round(math.degrees(lat2), 6)
lon2 = round(math.degrees(lon2), 6)
return [lat2, lon2]
def get_random_location(latitude, longitude, radius_km):
"""Return coordinates for a random location based on starting point and radius"""
global distance # update distance as a global - it will be used to calculate route zoom
# get random destination and distance
bearing = round(random.uniform(0, 360),3 )
distance = round(random.uniform(0, radius_km), 3)
# set zoom based on distance
# print(f"Bearing: {str(bearing)}, Distance (km): {str(distance_km)}")
set_value("bearing_label", f"Bearing: {str(bearing)}°, Distance: {str(distance)} km")
# calculate the new latitude and longitude
return plot_location(latitude, longitude, bearing, distance)
def get_image(coords):
"""Get a new Bing maps image for specified coordinates and save it as a PNG file"""
pic_url = f"{BASE_URL}{coords}/{zoom}?mapSize={str(img_size_x)},{str(img_size_y)}&pp={coords};;1&dcl=1&key={maps_key}"
image_data = requests.get(pic_url).content
with open(img_file_name, "wb") as handler:
handler.write(image_data)
def get_route_image(coords1, coords2, zoom, midpoint):
"""Get a new Bing maps image for specified coordinates and save it as a PNG file"""
pic_url = f"{BASE_URL}{midpoint}/{zoom}/Routes/Driving?waypoint.1={coords1}&waypoint.2={coords2}&mapSize={str(img_size_x)},{str(img_size_y)}&imagerySet=AerialWithLabels&key={maps_key}"
image_data = requests.get(pic_url).content
with open(img_file_name, "wb") as handler:
handler.write(image_data)
def get_midpoint(coord1, coord2):
'''Get the midway point between 2 coordinates, input and output are strings'''
coord1_list = coord1.split(',')
coord2_list = coord2.split(',')
lat1 = float(coord1_list[0])
lon1 = float(coord1_list[1])
lat2 = float(coord2_list[0])
lon2 = float(coord2_list[1])
midlat = lat1 + (lat2 - lat1)/2
midlon = lon1 + (lon2 - lon1)/2
return f"{str(midlat)},{str(midlon)}"
def show_origin(sender, callback):
"""Get the coordinates from the UI and get the Bing maps image for those coords"""
coords = get_value("Coords")
get_image(coords)
# update canvas
clear_drawing("canvas")
draw_image("canvas", img_file_name, [0, 0], pmax=[img_size_x, img_size_y])
def show_destination(sender, callback):
"""Display a map image for the destination coordinates"""
coords = get_value("destination_text")
if len(coords) < 3:
print("No destination") # to do: convert this message to a popup
return
get_image(coords)
# update canvas
clear_drawing("canvas")
draw_image("canvas", img_file_name, [0, 0], pmax=[img_size_x, img_size_y])
def show_route(sender, callback):
"""Display a map image for the destination coordinates"""
coords1 = get_value("Coords")
coords2 = get_value("destination_text")
if len(coords2) < 3:
print("No destination") # to do: convert this message to a popup
return
midpoint = get_midpoint(coords1, coords2)
# zoom of route map will be proportional to distance to make it fit on canvas
if distance < 1.8:
route_zoom = 16
elif distance < 3:
route_zoom = 15
elif distance < 6.5:
route_zoom = 14
elif distance < 12:
route_zoom = 13
else:
route_zoom = 12
get_route_image(coords1, coords2, route_zoom, midpoint)
# update canvas
clear_drawing("canvas")
draw_image("canvas", img_file_name, [0, 0], pmax=[img_size_x, img_size_y])
def get_random_destination(sender, callback):
"""Get new random destination based on starting coordinates"""
coords = get_value("Coords")
# calculate new coords and write then in the destination text box
coord_list = coords.split(",")
latitude = float(coord_list[0])
longitude = float(coord_list[1])
new_coords = get_random_location(latitude, longitude, MAX_DIST)
new_coords_str = f"{str(new_coords[0])},{str(new_coords[1])}"
set_value("destination_text", new_coords_str)
def main():
"""main routine to draw the UI and start the GUI"""
global maps_key
load_dotenv()
maps_key = os.environ.get("BING_MAPS_KEY")
coords = os.environ.get("DEFAULT_COORDS")
# set main window defaults
set_main_window_size(img_size_x + 20, img_size_y + 130)
set_main_window_pos(100, 25)
set_main_window_title("Random destination app")
with window("Main"):
add_text("Coordinates: ")
add_same_line()
add_input_text(
"Coords",
label="",
default_value=coords,
width=170,
callback=show_origin,
on_enter=True,
)
add_same_line()
add_button("Show origin", callback=show_origin)
add_text("Destination: ")
add_same_line()
add_input_text("destination_text", label="", width=170)
add_same_line()
add_button("Random destination", callback=get_random_destination)
add_same_line()
add_button("Show destination", callback=show_destination)
add_same_line()
add_button("Show route", callback=show_route)
add_text("bearing_label", default_value=" ")
add_spacing()
add_separator()
add_spacing()
add_drawing("canvas", width=img_size_x, height=img_size_y)
# if os.path.isfile(img_file_name):
# draw_image("canvas", img_file_name, [0, 0], pmax=[img_size_x, img_size_y])
start_dearpygui(primary_window="Main")
if __name__ == "__main__":
main()
| 32.990196 | 188 | 0.673254 | import math
import os
import random
import requests
from dotenv import load_dotenv
from dearpygui.core import *
from dearpygui.simple import *
EARTH_RADIUS = 6378.1
MAX_DIST = 16
maps_key = ""
BASE_URL = "https://dev.virtualearth.net/REST/v1/Imagery/Map/AerialWithLabels/"
zoom = "18"
distance = MAX_DIST
img_size_x = 900
img_size_y = 900
img_file_name = "pic1.png"
def plot_location(latitude, longitude, bearing, distance):
bearing_rad = math.radians(bearing)
lat1 = math.radians(latitude)
lon1 = math.radians(longitude)
d_over_r = distance / EARTH_RADIUS
lat2 = math.asin(
math.sin(lat1) * math.cos(d_over_r)
+ math.cos(lat1) * math.sin(d_over_r) * math.cos(bearing_rad)
)
lon2 = lon1 + math.atan2(
math.sin(bearing_rad) * math.sin(d_over_r) * math.cos(lat1),
math.cos(d_over_r) - math.sin(lat1) * math.sin(lat2),
)
lat2 = round(math.degrees(lat2), 6)
lon2 = round(math.degrees(lon2), 6)
return [lat2, lon2]
def get_random_location(latitude, longitude, radius_km):
global distance
bearing = round(random.uniform(0, 360),3 )
distance = round(random.uniform(0, radius_km), 3)
set_value("bearing_label", f"Bearing: {str(bearing)}°, Distance: {str(distance)} km")
return plot_location(latitude, longitude, bearing, distance)
def get_image(coords):
pic_url = f"{BASE_URL}{coords}/{zoom}?mapSize={str(img_size_x)},{str(img_size_y)}&pp={coords};;1&dcl=1&key={maps_key}"
image_data = requests.get(pic_url).content
with open(img_file_name, "wb") as handler:
handler.write(image_data)
def get_route_image(coords1, coords2, zoom, midpoint):
pic_url = f"{BASE_URL}{midpoint}/{zoom}/Routes/Driving?waypoint.1={coords1}&waypoint.2={coords2}&mapSize={str(img_size_x)},{str(img_size_y)}&imagerySet=AerialWithLabels&key={maps_key}"
image_data = requests.get(pic_url).content
with open(img_file_name, "wb") as handler:
handler.write(image_data)
def get_midpoint(coord1, coord2):
coord1_list = coord1.split(',')
coord2_list = coord2.split(',')
lat1 = float(coord1_list[0])
lon1 = float(coord1_list[1])
lat2 = float(coord2_list[0])
lon2 = float(coord2_list[1])
midlat = lat1 + (lat2 - lat1)/2
midlon = lon1 + (lon2 - lon1)/2
return f"{str(midlat)},{str(midlon)}"
def show_origin(sender, callback):
coords = get_value("Coords")
get_image(coords)
clear_drawing("canvas")
draw_image("canvas", img_file_name, [0, 0], pmax=[img_size_x, img_size_y])
def show_destination(sender, callback):
coords = get_value("destination_text")
if len(coords) < 3:
print("No destination")
return
get_image(coords)
clear_drawing("canvas")
draw_image("canvas", img_file_name, [0, 0], pmax=[img_size_x, img_size_y])
def show_route(sender, callback):
coords1 = get_value("Coords")
coords2 = get_value("destination_text")
if len(coords2) < 3:
print("No destination")
return
midpoint = get_midpoint(coords1, coords2)
if distance < 1.8:
route_zoom = 16
elif distance < 3:
route_zoom = 15
elif distance < 6.5:
route_zoom = 14
elif distance < 12:
route_zoom = 13
else:
route_zoom = 12
get_route_image(coords1, coords2, route_zoom, midpoint)
clear_drawing("canvas")
draw_image("canvas", img_file_name, [0, 0], pmax=[img_size_x, img_size_y])
def get_random_destination(sender, callback):
coords = get_value("Coords")
coord_list = coords.split(",")
latitude = float(coord_list[0])
longitude = float(coord_list[1])
new_coords = get_random_location(latitude, longitude, MAX_DIST)
new_coords_str = f"{str(new_coords[0])},{str(new_coords[1])}"
set_value("destination_text", new_coords_str)
def main():
global maps_key
load_dotenv()
maps_key = os.environ.get("BING_MAPS_KEY")
coords = os.environ.get("DEFAULT_COORDS")
set_main_window_size(img_size_x + 20, img_size_y + 130)
set_main_window_pos(100, 25)
set_main_window_title("Random destination app")
with window("Main"):
add_text("Coordinates: ")
add_same_line()
add_input_text(
"Coords",
label="",
default_value=coords,
width=170,
callback=show_origin,
on_enter=True,
)
add_same_line()
add_button("Show origin", callback=show_origin)
add_text("Destination: ")
add_same_line()
add_input_text("destination_text", label="", width=170)
add_same_line()
add_button("Random destination", callback=get_random_destination)
add_same_line()
add_button("Show destination", callback=show_destination)
add_same_line()
add_button("Show route", callback=show_route)
add_text("bearing_label", default_value=" ")
add_spacing()
add_separator()
add_spacing()
add_drawing("canvas", width=img_size_x, height=img_size_y)
start_dearpygui(primary_window="Main")
if __name__ == "__main__":
main()
| true | true |
f71ae3f98733e595da1ffdd6566812f756e6d03b | 3,576 | py | Python | metadata-ingestion/src/datahub/ingestion/transformer/add_dataset_schema_tags.py | ShubhamThakre/datahub | 08a5fcfd017d4a2903a7b637f1e2129b9d7793ea | [
"Apache-2.0"
] | 1,603 | 2016-03-03T17:21:03.000Z | 2020-01-22T22:12:02.000Z | metadata-ingestion/src/datahub/ingestion/transformer/add_dataset_schema_tags.py | ShubhamThakre/datahub | 08a5fcfd017d4a2903a7b637f1e2129b9d7793ea | [
"Apache-2.0"
] | 1,157 | 2016-03-03T19:29:22.000Z | 2020-01-20T14:41:59.000Z | metadata-ingestion/src/datahub/ingestion/transformer/add_dataset_schema_tags.py | ShubhamThakre/datahub | 08a5fcfd017d4a2903a7b637f1e2129b9d7793ea | [
"Apache-2.0"
] | 570 | 2016-03-03T17:21:05.000Z | 2020-01-21T06:54:10.000Z | from typing import Callable, List, Optional, Union
import datahub.emitter.mce_builder as builder
from datahub.configuration.common import ConfigModel, KeyValuePattern
from datahub.configuration.import_resolver import pydantic_resolve_key
from datahub.ingestion.api.common import PipelineContext
from datahub.ingestion.transformer.base_transformer import (
BaseTransformer,
SingleAspectTransformer,
)
from datahub.metadata.schema_classes import (
GlobalTagsClass,
SchemaFieldClass,
SchemaMetadataClass,
TagAssociationClass,
)
class AddDatasetSchemaTagsConfig(ConfigModel):
# Workaround for https://github.com/python/mypy/issues/708.
# Suggested by https://stackoverflow.com/a/64528725/5004662.
get_tags_to_add: Union[
Callable[[str], List[TagAssociationClass]],
Callable[[str], List[TagAssociationClass]],
]
_resolve_tag_fn = pydantic_resolve_key("get_tags_to_add")
class AddDatasetSchemaTags(BaseTransformer, SingleAspectTransformer):
"""Transformer that adds glossary tags to datasets according to a callback function."""
ctx: PipelineContext
config: AddDatasetSchemaTagsConfig
def __init__(self, config: AddDatasetSchemaTagsConfig, ctx: PipelineContext):
super().__init__()
self.ctx = ctx
self.config = config
def aspect_name(self) -> str:
return "schemaMetadata"
def entity_types(self) -> List[str]:
return ["dataset"]
@classmethod
def create(cls, config_dict: dict, ctx: PipelineContext) -> "AddDatasetSchemaTags":
config = AddDatasetSchemaTagsConfig.parse_obj(config_dict)
return cls(config, ctx)
def extend_field(self, schema_field: SchemaFieldClass) -> SchemaFieldClass:
tags_to_add = self.config.get_tags_to_add(schema_field.fieldPath)
if len(tags_to_add) > 0:
new_tags = (
schema_field.globalTags
if schema_field.globalTags is not None
else GlobalTagsClass(
tags=[],
)
)
new_tags.tags.extend(tags_to_add)
schema_field.globalTags = new_tags
return schema_field
def transform_aspect(
self, entity_urn: str, aspect_name: str, aspect: Optional[builder.Aspect]
) -> Optional[builder.Aspect]:
assert aspect is None or isinstance(aspect, SchemaMetadataClass)
if aspect is None:
return aspect
schema_metadata_aspect: SchemaMetadataClass = aspect
schema_metadata_aspect.fields = [
self.extend_field(field) for field in schema_metadata_aspect.fields
]
return schema_metadata_aspect # type: ignore
class PatternDatasetTagsConfig(ConfigModel):
tag_pattern: KeyValuePattern = KeyValuePattern.all()
class PatternAddDatasetSchemaTags(AddDatasetSchemaTags):
"""Transformer that adds a dynamic set of tags to each field in a dataset based on supplied patterns."""
def __init__(self, config: PatternDatasetTagsConfig, ctx: PipelineContext):
tag_pattern = config.tag_pattern
generic_config = AddDatasetSchemaTagsConfig(
get_tags_to_add=lambda path: [
TagAssociationClass(tag=urn) for urn in tag_pattern.value(path)
],
)
super().__init__(generic_config, ctx)
@classmethod
def create(
cls, config_dict: dict, ctx: PipelineContext
) -> "PatternAddDatasetSchemaTags":
config = PatternDatasetTagsConfig.parse_obj(config_dict)
return cls(config, ctx)
| 33.735849 | 108 | 0.699105 | from typing import Callable, List, Optional, Union
import datahub.emitter.mce_builder as builder
from datahub.configuration.common import ConfigModel, KeyValuePattern
from datahub.configuration.import_resolver import pydantic_resolve_key
from datahub.ingestion.api.common import PipelineContext
from datahub.ingestion.transformer.base_transformer import (
BaseTransformer,
SingleAspectTransformer,
)
from datahub.metadata.schema_classes import (
GlobalTagsClass,
SchemaFieldClass,
SchemaMetadataClass,
TagAssociationClass,
)
class AddDatasetSchemaTagsConfig(ConfigModel):
get_tags_to_add: Union[
Callable[[str], List[TagAssociationClass]],
Callable[[str], List[TagAssociationClass]],
]
_resolve_tag_fn = pydantic_resolve_key("get_tags_to_add")
class AddDatasetSchemaTags(BaseTransformer, SingleAspectTransformer):
ctx: PipelineContext
config: AddDatasetSchemaTagsConfig
def __init__(self, config: AddDatasetSchemaTagsConfig, ctx: PipelineContext):
super().__init__()
self.ctx = ctx
self.config = config
def aspect_name(self) -> str:
return "schemaMetadata"
def entity_types(self) -> List[str]:
return ["dataset"]
@classmethod
def create(cls, config_dict: dict, ctx: PipelineContext) -> "AddDatasetSchemaTags":
config = AddDatasetSchemaTagsConfig.parse_obj(config_dict)
return cls(config, ctx)
def extend_field(self, schema_field: SchemaFieldClass) -> SchemaFieldClass:
tags_to_add = self.config.get_tags_to_add(schema_field.fieldPath)
if len(tags_to_add) > 0:
new_tags = (
schema_field.globalTags
if schema_field.globalTags is not None
else GlobalTagsClass(
tags=[],
)
)
new_tags.tags.extend(tags_to_add)
schema_field.globalTags = new_tags
return schema_field
def transform_aspect(
self, entity_urn: str, aspect_name: str, aspect: Optional[builder.Aspect]
) -> Optional[builder.Aspect]:
assert aspect is None or isinstance(aspect, SchemaMetadataClass)
if aspect is None:
return aspect
schema_metadata_aspect: SchemaMetadataClass = aspect
schema_metadata_aspect.fields = [
self.extend_field(field) for field in schema_metadata_aspect.fields
]
return schema_metadata_aspect
class PatternDatasetTagsConfig(ConfigModel):
tag_pattern: KeyValuePattern = KeyValuePattern.all()
class PatternAddDatasetSchemaTags(AddDatasetSchemaTags):
def __init__(self, config: PatternDatasetTagsConfig, ctx: PipelineContext):
tag_pattern = config.tag_pattern
generic_config = AddDatasetSchemaTagsConfig(
get_tags_to_add=lambda path: [
TagAssociationClass(tag=urn) for urn in tag_pattern.value(path)
],
)
super().__init__(generic_config, ctx)
@classmethod
def create(
cls, config_dict: dict, ctx: PipelineContext
) -> "PatternAddDatasetSchemaTags":
config = PatternDatasetTagsConfig.parse_obj(config_dict)
return cls(config, ctx)
| true | true |
f71ae4f35d601be11c164199f86811680f877aef | 8,965 | py | Python | teambition/api/works.py | jxtech/teambition-api | a15b845fa029d56c084fe134bd082ee8ba25d534 | [
"MIT"
] | 47 | 2015-06-18T15:26:39.000Z | 2022-02-22T08:01:58.000Z | teambition/api/works.py | messense/teambition-api | a15b845fa029d56c084fe134bd082ee8ba25d534 | [
"MIT"
] | 5 | 2015-07-07T11:09:36.000Z | 2020-02-17T08:38:22.000Z | teambition/api/works.py | jxtech/teambition-api | a15b845fa029d56c084fe134bd082ee8ba25d534 | [
"MIT"
] | 13 | 2015-06-18T10:07:04.000Z | 2021-09-22T03:36:05.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from optionaldict import optionaldict
from teambition.api.base import TeambitionAPI
class Works(TeambitionAPI):
def get(self, id=None, parent_id=None, page=None, count=None, all=None):
"""
获取文件信息
详情请参考
http://docs.teambition.com/wiki/works#works-get
:param id: 可选,文件 ID
:param parent_id: 可选,父级 ID
:param page: 可选,当前页,默认为 1
:param count: 可选,每页数量,默认为 30
:param all: 可选,若提供此参数则返回所有
:return: 返回的 JSON 数据包
"""
assert id or parent_id
params = optionaldict(
page=page,
count=count,
all=all
)
if id:
endpoint = 'api/works/{0}'.format(id)
elif parent_id:
endpoint = 'api/works'
params['_parentId'] = parent_id
return self._get(endpoint, params=params)
def create(self, parent_id, file_name, file_size, file_type, file_category,
file_key, image_width=None, image_height=None,
involve_members=None):
"""
新建文件
详情请参考
http://docs.teambition.com/wiki/works#works-create
:param parent_id: 所属目录 ID
:param file_name: 文件名
:param file_size: 文件大小
:param file_type: 文件类型
:param file_category: 文件类别
:param file_key: 使用 striker 服务上传后可得
:param image_width: 可选,图片宽度
:param image_height: 可选,图片高度
:param involve_members: 可选
:return: 返回的 JSON 数据包
"""
data = optionaldict(
_parentId=parent_id,
fileName=file_name,
fileSize=file_size,
fileType=file_type,
fileCategory=file_category,
fileKey=file_key,
imageWidth=image_width,
imageHeight=image_height,
involveMembers=involve_members
)
return self._post(
'api/works',
data=data
)
def like(self, id):
"""
赞文件
详情请参考
http://docs.teambition.com/wiki/works#works-like
:param id: 文件 ID
:return: 返回的 JSON 数据包
"""
return self._post('api/works/{0}/like'.format(id))
def update(self, id, file_name, description=None):
"""
更新文件
详情请参考
http://docs.teambition.com/wiki/works#works-update
:param id: 文件 ID
:param file_name: 文件名
:param description: 可选,描述
:return: 返回的 JSON 数据包
"""
data = optionaldict(
fileName=file_name,
description=description
)
return self._put(
'api/works/{0}'.format(id),
data=data
)
def move(self, id, parent_id):
"""
移动文件
详情请参考
http://docs.teambition.com/wiki/works#works-move
:param id: 文件 ID
:param parent_id: 新的目录 ID
:return: 返回的 JSON 数据包
"""
return self._put(
'api/works/{0}'.format(id),
data={
'_parentId': parent_id
}
)
def delete(self, id):
"""
删除文件
详情请参考
http://docs.teambition.com/wiki/works#works-delete
:param id: 文件 ID
:return: 返回的 JSON 数据包
"""
return self._delete('api/works/{0}'.format(id))
def update_members(self, id, members):
"""
更新文件参与者
详情请参考
http://docs.teambition.com/wiki/works#works-update-involvemembers
:param id: 文件 ID
:param members: 参与者 ID 列表
:return: 返回的 JSON 数据包
"""
return self._put(
'api/works/{0}/involveMembers'.format(id),
data={
'involveMembers': members
}
)
def get_tags(self, id):
"""
获取任务标签列表
:param id: 文件 ID
:return: 返回的 JSON 数据包
"""
return self._get('api/works/{0}/tags'.format(id))
def remove_tag(self, id, tag_id):
"""
移除标签
:param id: 文件 ID
:param name: 标签 ID
:return: 返回的 JSON 数据包
"""
return self._delete('api/works/{0}/tags/{1}'.format(id, tag_id))
def add_tag(self, id, tag_id):
"""
关联标签
:param id: 文件 ID
:param tag_id: 标签 ID
:return: 返回的 JSON 数据包
"""
return self._put('api/works/{0}/tags/{1}'.format(id, tag_id))
def get_objectlinks(self, id):
"""
获取文件关联的 objectlink 列表
:param id: 文件 ID
:return: 返回的 JSON 数据包
"""
return self._get('api/works/{0}/objectlinks'.format(id))
def create_objectlink(self, id, linked_id, linked_type):
"""
关联对象
:param id: 文件 ID
:param linked_id: 关联对象 ID
:param linked_type: 关联对象类型
:return: 返回的 JSON 数据包
"""
return self._post(
'api/objectlinks',
data={
'_parentId': id,
'parentType': 'work',
'_linkedId': linked_id,
'linkedType': linked_type
}
)
def get_versions(self, id):
"""
获取文件关联的历史版本信息
详情请参考
http://docs.teambition.com/wiki/works-versions#works-versions-list
:param id: 文件 ID
:return: 历史版本列表
"""
return self._get('api/works/{0}/versions'.format(id))
def get_version(self, id, version_id):
"""
获取单个历史版本信息
详情请参考
http://docs.teambition.com/wiki/works-versions#works-versions-get
:param id: 文件 ID
:param version_id: 历史版本 ID
:return: 历史版本信息
"""
return self._get('api/works/{0}/versions/{1}'.format(id, version_id))
def update_version(self, id, version_id, file_name=None, description=None):
"""
获取单个历史版本信息
详情请参考
http://docs.teambition.com/wiki/works-versions#works-versions-update
:param id: 文件 ID
:param version_id: 历史版本 ID
:param file_name: 可选,文件名
:param description: 可选,描述
:return: 返回的 JSON 数据包
"""
data = optionaldict(fileName=file_name, description=description)
return self._put(
'api/works/{0}/versions/{1}'.format(id, version_id),
data=data
)
def delete_version(self, id, version_id):
"""
删除单个历史版本
详情请参考
http://docs.teambition.com/wiki/works-versions#works-versions-delete
:param id: 文件 ID
:param version_id: 历史版本 ID
:return: 返回的 JSON 数据包
"""
return self._delete(
'api/works/{0}/versions/{1}'.format(id, version_id)
)
def create_version(self, id, file_name, file_size, file_type,
file_category, file_key, image_width=None,
image_height=None, involve_members=None):
"""
新建文件
详情请参考
http://docs.teambition.com/wiki/works-versions#works-versions-post
:param id: 文件 ID
:param file_name: 文件名
:param file_size: 文件大小
:param file_type: 文件类型
:param file_category: 文件类别
:param file_key: 使用 striker 服务上传后可得
:param image_width: 可选,图片宽度
:param image_height: 可选,图片高度
:param involve_members: 可选
:return: 返回的 JSON 数据包
"""
data = optionaldict(
fileName=file_name,
fileSize=file_size,
fileType=file_type,
fileCategory=file_category,
fileKey=file_key,
imageWidth=image_width,
imageHeight=image_height,
involveMembers=involve_members
)
return self._post(
'api/works/{0}/versions'.format(id),
data=data
)
def link_task(self, id, linked_id):
"""
关联任务
:param id: 任务 ID
:param linked_id: 关联任务 ID
:return: 返回的 JSON 数据包
"""
return self.create_objectlink(id, linked_id, 'task')
def link_post(self, id, linked_id):
"""
关联分享
:param id: 任务 ID
:param linked_id: 关联分享 ID
:return: 返回的 JSON 数据包
"""
return self.create_objectlink(id, linked_id, 'post')
def link_event(self, id, linked_id):
"""
关联日程
:param id: 任务 ID
:param linked_id: 关联日程 ID
:return: 返回的 JSON 数据包
"""
return self.create_objectlink(id, linked_id, 'event')
def link_work(self, id, linked_id):
"""
关联文件
:param id: 任务 ID
:param linked_id: 关联文件 ID
:return: 返回的 JSON 数据包
"""
return self.create_objectlink(id, linked_id, 'work')
def get_activities(self, id):
"""
获取文件动态
:param id: 文件 ID
:return: 返回的 JSON 数据包
"""
return self._get(
'api/activities',
params={'_boundToObjectId': id}
)
| 25.112045 | 79 | 0.530508 |
from __future__ import absolute_import, unicode_literals
from optionaldict import optionaldict
from teambition.api.base import TeambitionAPI
class Works(TeambitionAPI):
def get(self, id=None, parent_id=None, page=None, count=None, all=None):
assert id or parent_id
params = optionaldict(
page=page,
count=count,
all=all
)
if id:
endpoint = 'api/works/{0}'.format(id)
elif parent_id:
endpoint = 'api/works'
params['_parentId'] = parent_id
return self._get(endpoint, params=params)
def create(self, parent_id, file_name, file_size, file_type, file_category,
file_key, image_width=None, image_height=None,
involve_members=None):
data = optionaldict(
_parentId=parent_id,
fileName=file_name,
fileSize=file_size,
fileType=file_type,
fileCategory=file_category,
fileKey=file_key,
imageWidth=image_width,
imageHeight=image_height,
involveMembers=involve_members
)
return self._post(
'api/works',
data=data
)
def like(self, id):
return self._post('api/works/{0}/like'.format(id))
def update(self, id, file_name, description=None):
data = optionaldict(
fileName=file_name,
description=description
)
return self._put(
'api/works/{0}'.format(id),
data=data
)
def move(self, id, parent_id):
return self._put(
'api/works/{0}'.format(id),
data={
'_parentId': parent_id
}
)
def delete(self, id):
return self._delete('api/works/{0}'.format(id))
def update_members(self, id, members):
return self._put(
'api/works/{0}/involveMembers'.format(id),
data={
'involveMembers': members
}
)
def get_tags(self, id):
return self._get('api/works/{0}/tags'.format(id))
def remove_tag(self, id, tag_id):
return self._delete('api/works/{0}/tags/{1}'.format(id, tag_id))
def add_tag(self, id, tag_id):
return self._put('api/works/{0}/tags/{1}'.format(id, tag_id))
def get_objectlinks(self, id):
return self._get('api/works/{0}/objectlinks'.format(id))
def create_objectlink(self, id, linked_id, linked_type):
return self._post(
'api/objectlinks',
data={
'_parentId': id,
'parentType': 'work',
'_linkedId': linked_id,
'linkedType': linked_type
}
)
def get_versions(self, id):
return self._get('api/works/{0}/versions'.format(id))
def get_version(self, id, version_id):
return self._get('api/works/{0}/versions/{1}'.format(id, version_id))
def update_version(self, id, version_id, file_name=None, description=None):
data = optionaldict(fileName=file_name, description=description)
return self._put(
'api/works/{0}/versions/{1}'.format(id, version_id),
data=data
)
def delete_version(self, id, version_id):
return self._delete(
'api/works/{0}/versions/{1}'.format(id, version_id)
)
def create_version(self, id, file_name, file_size, file_type,
file_category, file_key, image_width=None,
image_height=None, involve_members=None):
data = optionaldict(
fileName=file_name,
fileSize=file_size,
fileType=file_type,
fileCategory=file_category,
fileKey=file_key,
imageWidth=image_width,
imageHeight=image_height,
involveMembers=involve_members
)
return self._post(
'api/works/{0}/versions'.format(id),
data=data
)
def link_task(self, id, linked_id):
return self.create_objectlink(id, linked_id, 'task')
def link_post(self, id, linked_id):
return self.create_objectlink(id, linked_id, 'post')
def link_event(self, id, linked_id):
return self.create_objectlink(id, linked_id, 'event')
def link_work(self, id, linked_id):
return self.create_objectlink(id, linked_id, 'work')
def get_activities(self, id):
return self._get(
'api/activities',
params={'_boundToObjectId': id}
)
| true | true |
f71ae4fea4b81b1609ed48e653382c873a175421 | 1,514 | py | Python | kicad_to_digikey_bom.py | bradgrantham/circuit_cad_tools | 78521453849c3fcfdfa2a7d1916079283ad546c9 | [
"Apache-2.0"
] | 1 | 2022-03-13T19:44:24.000Z | 2022-03-13T19:44:24.000Z | kicad_to_digikey_bom.py | bradgrantham/circuit_cad_tools | 78521453849c3fcfdfa2a7d1916079283ad546c9 | [
"Apache-2.0"
] | null | null | null | kicad_to_digikey_bom.py | bradgrantham/circuit_cad_tools | 78521453849c3fcfdfa2a7d1916079283ad546c9 | [
"Apache-2.0"
] | 1 | 2020-10-29T11:10:44.000Z | 2020-10-29T11:10:44.000Z | import csv
import sys
bom = {}
reader = csv.reader(sys.stdin)
header = next(reader)
columns = {}
for (column_name, column_number) in zip(header, range(0, len(header))):
columns[column_name.strip()] = column_number
# Digikey ignores the headers and makes one map columns, so this
# is unnecessary and also annoying.
# print ",".join(("Digi-Key Part Number", "Manufacturer Name", "Manufacturer Part Number", "Customer Reference", "Quantity 1", "Quantity 2", "Quantity 3"))
for row in reader:
entry = {}
for (column_name, column_number) in columns.iteritems():
if column_number < len(row):
entry[column_name] = row[column_number].strip()
else:
entry[column_name] = ""
dist = entry.get('Distributor', '')
distpn = entry.get('Distributor PN', '')
mfg = entry.get('Manufacturer', '')
pn = entry.get('PN', '')
value = entry.get('Value', '')
if dist != 'Digikey':
print >>sys.stderr, "no digikey part number for reference %s, value %s footprint %s"% (entry['Reference'], entry['Value'], entry['Footprint'])
else:
bom.setdefault(dist + distpn + mfg + pn, []).append(entry)
for (ref, entries) in bom.iteritems():
dist = entries[0].get('Distributor', '')
distpn = entries[0].get('Distributor PN', '')
mfg = entries[0].get('Manufacturer', '')
pn = entries[0].get('PN', '')
refs = " ".join([ref['Reference'] for ref in entries])
print ",".join((distpn, mfg, pn, refs, str(len(entries)), "10", "100"))
| 36.926829 | 155 | 0.624174 | import csv
import sys
bom = {}
reader = csv.reader(sys.stdin)
header = next(reader)
columns = {}
for (column_name, column_number) in zip(header, range(0, len(header))):
columns[column_name.strip()] = column_number
for row in reader:
entry = {}
for (column_name, column_number) in columns.iteritems():
if column_number < len(row):
entry[column_name] = row[column_number].strip()
else:
entry[column_name] = ""
dist = entry.get('Distributor', '')
distpn = entry.get('Distributor PN', '')
mfg = entry.get('Manufacturer', '')
pn = entry.get('PN', '')
value = entry.get('Value', '')
if dist != 'Digikey':
print >>sys.stderr, "no digikey part number for reference %s, value %s footprint %s"% (entry['Reference'], entry['Value'], entry['Footprint'])
else:
bom.setdefault(dist + distpn + mfg + pn, []).append(entry)
for (ref, entries) in bom.iteritems():
dist = entries[0].get('Distributor', '')
distpn = entries[0].get('Distributor PN', '')
mfg = entries[0].get('Manufacturer', '')
pn = entries[0].get('PN', '')
refs = " ".join([ref['Reference'] for ref in entries])
print ",".join((distpn, mfg, pn, refs, str(len(entries)), "10", "100"))
| false | true |
f71ae63d4807be5163417cadc8694e164fd2ab08 | 145 | py | Python | example/list.py | LunaNode/lndynamic-ap | 7ffbc17742b7595b7e36cbef7213fc5dee18c50d | [
"MIT"
] | null | null | null | example/list.py | LunaNode/lndynamic-ap | 7ffbc17742b7595b7e36cbef7213fc5dee18c50d | [
"MIT"
] | 1 | 2016-05-15T20:10:52.000Z | 2016-05-15T20:10:52.000Z | example/list.py | LunaNode/lndynamic-ap | 7ffbc17742b7595b7e36cbef7213fc5dee18c50d | [
"MIT"
] | 12 | 2016-04-30T21:02:03.000Z | 2022-03-25T06:47:12.000Z | from lndynamic import LNDynamic
api_id = 'YOUR API ID'
api_key = 'YOUR API KEY'
api = LNDynamic(api_id, api_key)
print api.request('vm', 'list')
| 24.166667 | 32 | 0.731034 | from lndynamic import LNDynamic
api_id = 'YOUR API ID'
api_key = 'YOUR API KEY'
api = LNDynamic(api_id, api_key)
print api.request('vm', 'list')
| false | true |
f71ae8cf1533e7bd8ad8fa5b8cf9a24021f79424 | 623 | py | Python | example/your_app/app.py | keyloguer/flask_middleware | 38d01d7f87484f85aaeb7bb6deaa0f1055497c1a | [
"Apache-2.0"
] | 21 | 2019-10-09T18:50:57.000Z | 2020-10-14T20:49:57.000Z | example/your_app/app.py | keyloguer/flask_middleware | 38d01d7f87484f85aaeb7bb6deaa0f1055497c1a | [
"Apache-2.0"
] | null | null | null | example/your_app/app.py | keyloguer/flask_middleware | 38d01d7f87484f85aaeb7bb6deaa0f1055497c1a | [
"Apache-2.0"
] | null | null | null | from flask import Flask
from flask_middleware_jwt import Middleware, middleware_jwt_required
app = Flask(__name__)
app.config['MIDDLEWARE_URL_IDENTITY'] = 'http://0.0.0.0:5000'
app.config['MIDDLEWARE_VERIFY_ENDPOINT'] = '/token/verify'
app.config['MIDDLEWARE_BEARER'] = True
app.config['MIDDLEWARE_VERIFY_HTTP_VERB'] = 'GET'
app.config['JWT_SECRET'] = 'super-secret'
app.config['JWT_ALGORITHMS'] = ['HS256']
middleware = Middleware(app)
@app.route("/")
@middleware_jwt_required
def hello_world():
return 'Hello World!'
if __name__ == '__main__':
app.run(port=5001) | 28.318182 | 68 | 0.693419 | from flask import Flask
from flask_middleware_jwt import Middleware, middleware_jwt_required
app = Flask(__name__)
app.config['MIDDLEWARE_URL_IDENTITY'] = 'http://0.0.0.0:5000'
app.config['MIDDLEWARE_VERIFY_ENDPOINT'] = '/token/verify'
app.config['MIDDLEWARE_BEARER'] = True
app.config['MIDDLEWARE_VERIFY_HTTP_VERB'] = 'GET'
app.config['JWT_SECRET'] = 'super-secret'
app.config['JWT_ALGORITHMS'] = ['HS256']
middleware = Middleware(app)
@app.route("/")
@middleware_jwt_required
def hello_world():
return 'Hello World!'
if __name__ == '__main__':
app.run(port=5001) | true | true |
f71ae8eb0aa5e13e7033134268ddae1c0ba1dd97 | 6,344 | py | Python | modules/pulse/pulse_sim.py | timsnow/PandABlocks-FPGA | 7df03a7a4415c5c9e02c80dc80c3d377ab480e5c | [
"Apache-2.0"
] | null | null | null | modules/pulse/pulse_sim.py | timsnow/PandABlocks-FPGA | 7df03a7a4415c5c9e02c80dc80c3d377ab480e5c | [
"Apache-2.0"
] | null | null | null | modules/pulse/pulse_sim.py | timsnow/PandABlocks-FPGA | 7df03a7a4415c5c9e02c80dc80c3d377ab480e5c | [
"Apache-2.0"
] | null | null | null | from common.python.simulations import BlockSimulation, properties_from_ini
from collections import deque
# max queue size
MAX_QUEUE = 1023
# min FPGA deadtime between queued pulses
MIN_QUEUE_DELTA = 4
# time taken to clear queue
QUEUE_CLEAR_TIME = 4
NAMES, PROPERTIES = properties_from_ini(__file__, "pulse.block.ini")
class PulseSimulation(BlockSimulation):
ENABLE, TRIG, DELAY_L, DELAY_H, WIDTH_L, WIDTH_H, TRIG_EDGE, OUT, QUEUED, \
DROPPED = PROPERTIES
def __init__(self):
self.queue = deque()
self.valid_ts = 0
self.trigtime = 0
self.enqueue = 0
self.dequeue = 0
self.delaypulse = 0
self.delayqueue = 1
self.doqueue = 0
self.missedsignal = 0
self.width = 0
self.delay = 0
def do_pulse(self, ts, changes):
"""We've received a bit event on INP, so queue some output values
based on DELAY and WIDTH"""
# If the queue isn't valid at the moment then error
# If there isn't room for 2 on the queue then error
# If WIDTH is zero DELAY should be >3, or if DELAY is zero WIDTH
# should be >3 for the FIFO to iterate fully
width = self.width
delay = self.delay
if ts < self.valid_ts or len(self.queue) + 2 > MAX_QUEUE:
self.DROPPED += 1
# If there is no specified width then use the width of input pulse
elif width == 0:
self.queue.append((ts + delay, self.TRIG))
elif self.TRIG and self.TRIG_EDGE == 0:
self.generate_queue(ts, delay, width)
elif not self.TRIG and self.TRIG_EDGE == 1 and delay == 0:
self.generate_queue(ts+1, delay, width)
elif not self.TRIG and self.TRIG_EDGE == 1 and delay >= 0:
self.generate_queue(ts, delay, width)
elif self.TRIG and self.TRIG_EDGE == 2:
self.generate_queue(ts, delay, width)
elif not self.TRIG and self.TRIG_EDGE == 2:
self.generate_queue(ts, delay+1, width)
def generate_queue(self, ts, delay, width):
# generate both high and low queue from inp
start = ts + delay
# make sure that start is after any pulse on queue
if self.queue and start < self.queue[-1][0] + MIN_QUEUE_DELTA:
self.DROPPED += 1
self.missedsignal += 1
else:
self.queue.append((start, 1))
self.queue.append((start + width, 0))
def do_reset(self):
"""Reset the block, called on rising edge of ENABLE"""
self.DROPPED = 0
def do_clear_queue(self, ts):
"""Clear the queue, but not any errors"""
self.valid_ts = ts + QUEUE_CLEAR_TIME
self.OUT = 0
self.queue.clear()
def on_changes(self, ts, changes):
"""Handle changes at a particular timestamp, then return the timestamp
when we next need to be called"""
# This is a ConfigBlock object for use to get our strings from
super(PulseSimulation, self).on_changes(ts, changes)
# This is the next time we need to be called
next_ts = ts+1
# If the DELAY and WIDTH inputs are out of bounds, set them to 4
if 0 < self.DELAY_L < 4:
self.delay = 4
else:
self.delay = self.DELAY_L
if (0 < self.WIDTH_L < 4) and self.DELAY_L == 0:
self.width = 4
else:
self.width = self.WIDTH_L
# Append queue if the start of the queue is delayed
if self.delaypulse == 1:
if self.WIDTH_L > 0 or self.doqueue == 1:
self.QUEUED += 1
self.delaypulse = 0
self.doqueue = 0
elif changes.get(NAMES.TRIG, None) == 0:
self.doqueue = 1
# Increment the queue
if self.enqueue == 1 and ts == self.trigtime+1:
if self.missedsignal > 0:
self.missedsignal -= 1
else:
self.QUEUED += 1
# Is a pulse of zero required before next pulse?
if self.DELAY_L > 0:
self.delaypulse = 1
self.enqueue = 0
# On the trigger edge set the writestrobe to the queue
# If both DELAY and WIDTH are equal to 0, the module bypasses the queue
if self.width == 0 and self.delay == 0:
self.enqueue = 0
elif changes.get(NAMES.TRIG) == 1 and self.TRIG_EDGE in (0, 2):
# Positive edge
self.trigtime = ts
self.enqueue = 1
elif changes.get(NAMES.TRIG) == 0 and self.TRIG_EDGE in (1, 2):
# Negative edge
self.trigtime = ts + 1
self.enqueue = 1
# Set attributes, and flag clear queue
for name, value in changes.items():
setattr(self, name, value)
if name in ("DELAY_L", "DELAY_L", "WIDTH_L", "WIDTH_L"):
self.do_clear_queue(ts)
# On rising edge of enable clear errors
if changes.get(NAMES.ENABLE, None) == 1:
self.do_reset()
# on falling edge of enable reset output and queue
elif changes.get(NAMES.ENABLE, None) == 0:
self.do_clear_queue(ts)
# If we got an input and we were enabled then output a pulse
if NAMES.TRIG in changes and self.ENABLE:
self.do_pulse(ts, changes)
# if we have anything else on the queue return when it's due
if self.queue:
# next_ts = self.queue[0][0]
# if the pulse on our queue is ready to be produced then produce
if self.queue[0][0] == ts:
if self.queue.popleft()[1] == 1:
self.OUT = 1
self.dequeue = 1
else:
self.OUT = 0
assert next_ts >= ts, "Going back in time %s >= %s" % (next_ts, ts)
# At the end of the pulse, the queue count has decreased
if self.OUT == 0 and self.dequeue == 1:
if self.QUEUED > 0:
self.QUEUED -= 1
self.dequeue = 0
self.delayqueue = 1
# Decrease the queue count for the zero pulse
if self.OUT == 1 and self.delayqueue == 1:
if self.QUEUED > 0:
self.QUEUED -= 1
self.delayqueue = 0
return next_ts
| 36.45977 | 79 | 0.565889 | from common.python.simulations import BlockSimulation, properties_from_ini
from collections import deque
MAX_QUEUE = 1023
MIN_QUEUE_DELTA = 4
QUEUE_CLEAR_TIME = 4
NAMES, PROPERTIES = properties_from_ini(__file__, "pulse.block.ini")
class PulseSimulation(BlockSimulation):
ENABLE, TRIG, DELAY_L, DELAY_H, WIDTH_L, WIDTH_H, TRIG_EDGE, OUT, QUEUED, \
DROPPED = PROPERTIES
def __init__(self):
self.queue = deque()
self.valid_ts = 0
self.trigtime = 0
self.enqueue = 0
self.dequeue = 0
self.delaypulse = 0
self.delayqueue = 1
self.doqueue = 0
self.missedsignal = 0
self.width = 0
self.delay = 0
def do_pulse(self, ts, changes):
# If there isn't room for 2 on the queue then error
width = self.width
delay = self.delay
if ts < self.valid_ts or len(self.queue) + 2 > MAX_QUEUE:
self.DROPPED += 1
elif width == 0:
self.queue.append((ts + delay, self.TRIG))
elif self.TRIG and self.TRIG_EDGE == 0:
self.generate_queue(ts, delay, width)
elif not self.TRIG and self.TRIG_EDGE == 1 and delay == 0:
self.generate_queue(ts+1, delay, width)
elif not self.TRIG and self.TRIG_EDGE == 1 and delay >= 0:
self.generate_queue(ts, delay, width)
elif self.TRIG and self.TRIG_EDGE == 2:
self.generate_queue(ts, delay, width)
elif not self.TRIG and self.TRIG_EDGE == 2:
self.generate_queue(ts, delay+1, width)
def generate_queue(self, ts, delay, width):
start = ts + delay
if self.queue and start < self.queue[-1][0] + MIN_QUEUE_DELTA:
self.DROPPED += 1
self.missedsignal += 1
else:
self.queue.append((start, 1))
self.queue.append((start + width, 0))
def do_reset(self):
self.DROPPED = 0
def do_clear_queue(self, ts):
self.valid_ts = ts + QUEUE_CLEAR_TIME
self.OUT = 0
self.queue.clear()
def on_changes(self, ts, changes):
super(PulseSimulation, self).on_changes(ts, changes)
next_ts = ts+1
if 0 < self.DELAY_L < 4:
self.delay = 4
else:
self.delay = self.DELAY_L
if (0 < self.WIDTH_L < 4) and self.DELAY_L == 0:
self.width = 4
else:
self.width = self.WIDTH_L
if self.delaypulse == 1:
if self.WIDTH_L > 0 or self.doqueue == 1:
self.QUEUED += 1
self.delaypulse = 0
self.doqueue = 0
elif changes.get(NAMES.TRIG, None) == 0:
self.doqueue = 1
if self.enqueue == 1 and ts == self.trigtime+1:
if self.missedsignal > 0:
self.missedsignal -= 1
else:
self.QUEUED += 1
if self.DELAY_L > 0:
self.delaypulse = 1
self.enqueue = 0
if self.width == 0 and self.delay == 0:
self.enqueue = 0
elif changes.get(NAMES.TRIG) == 1 and self.TRIG_EDGE in (0, 2):
self.trigtime = ts
self.enqueue = 1
elif changes.get(NAMES.TRIG) == 0 and self.TRIG_EDGE in (1, 2):
self.trigtime = ts + 1
self.enqueue = 1
for name, value in changes.items():
setattr(self, name, value)
if name in ("DELAY_L", "DELAY_L", "WIDTH_L", "WIDTH_L"):
self.do_clear_queue(ts)
if changes.get(NAMES.ENABLE, None) == 1:
self.do_reset()
elif changes.get(NAMES.ENABLE, None) == 0:
self.do_clear_queue(ts)
if NAMES.TRIG in changes and self.ENABLE:
self.do_pulse(ts, changes)
if self.queue:
# next_ts = self.queue[0][0]
# if the pulse on our queue is ready to be produced then produce
if self.queue[0][0] == ts:
if self.queue.popleft()[1] == 1:
self.OUT = 1
self.dequeue = 1
else:
self.OUT = 0
assert next_ts >= ts, "Going back in time %s >= %s" % (next_ts, ts)
# At the end of the pulse, the queue count has decreased
if self.OUT == 0 and self.dequeue == 1:
if self.QUEUED > 0:
self.QUEUED -= 1
self.dequeue = 0
self.delayqueue = 1
# Decrease the queue count for the zero pulse
if self.OUT == 1 and self.delayqueue == 1:
if self.QUEUED > 0:
self.QUEUED -= 1
self.delayqueue = 0
return next_ts
| true | true |
f71ae95e60037316cd6d37acc29a87db6cdf90d0 | 25,177 | py | Python | Lib/fontTools/ttLib/tables/E_B_L_C_.py | ViktorRubenko/fonttools | 522c32547c569d655feafd475987284bc0dabed1 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2020-05-07T16:29:02.000Z | 2020-05-07T16:29:02.000Z | Lib/fontTools/ttLib/tables/E_B_L_C_.py | ViktorRubenko/fonttools | 522c32547c569d655feafd475987284bc0dabed1 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | Lib/fontTools/ttLib/tables/E_B_L_C_.py | ViktorRubenko/fonttools | 522c32547c569d655feafd475987284bc0dabed1 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | from fontTools.misc.py23 import *
from fontTools.misc import sstruct
from . import DefaultTable
from fontTools.misc.textTools import safeEval
from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat
import struct
import itertools
from collections import deque
import logging
log = logging.getLogger(__name__)
eblcHeaderFormat = """
> # big endian
version: 16.16F
numSizes: I
"""
# The table format string is split to handle sbitLineMetrics simply.
bitmapSizeTableFormatPart1 = """
> # big endian
indexSubTableArrayOffset: I
indexTablesSize: I
numberOfIndexSubTables: I
colorRef: I
"""
# The compound type for hori and vert.
sbitLineMetricsFormat = """
> # big endian
ascender: b
descender: b
widthMax: B
caretSlopeNumerator: b
caretSlopeDenominator: b
caretOffset: b
minOriginSB: b
minAdvanceSB: b
maxBeforeBL: b
minAfterBL: b
pad1: b
pad2: b
"""
# hori and vert go between the two parts.
bitmapSizeTableFormatPart2 = """
> # big endian
startGlyphIndex: H
endGlyphIndex: H
ppemX: B
ppemY: B
bitDepth: B
flags: b
"""
indexSubTableArrayFormat = ">HHL"
indexSubTableArraySize = struct.calcsize(indexSubTableArrayFormat)
indexSubHeaderFormat = ">HHL"
indexSubHeaderSize = struct.calcsize(indexSubHeaderFormat)
codeOffsetPairFormat = ">HH"
codeOffsetPairSize = struct.calcsize(codeOffsetPairFormat)
class table_E_B_L_C_(DefaultTable.DefaultTable):
dependencies = ['EBDT']
# This method can be overridden in subclasses to support new formats
# without changing the other implementation. Also can be used as a
# convenience method for coverting a font file to an alternative format.
def getIndexFormatClass(self, indexFormat):
return eblc_sub_table_classes[indexFormat]
def decompile(self, data, ttFont):
# Save the original data because offsets are from the start of the table.
origData = data
i = 0;
dummy = sstruct.unpack(eblcHeaderFormat, data[:8], self)
i += 8;
self.strikes = []
for curStrikeIndex in range(self.numSizes):
curStrike = Strike()
self.strikes.append(curStrike)
curTable = curStrike.bitmapSizeTable
dummy = sstruct.unpack2(bitmapSizeTableFormatPart1, data[i:i+16], curTable)
i += 16
for metric in ('hori', 'vert'):
metricObj = SbitLineMetrics()
vars(curTable)[metric] = metricObj
dummy = sstruct.unpack2(sbitLineMetricsFormat, data[i:i+12], metricObj)
i += 12
dummy = sstruct.unpack(bitmapSizeTableFormatPart2, data[i:i+8], curTable)
i += 8
for curStrike in self.strikes:
curTable = curStrike.bitmapSizeTable
for subtableIndex in range(curTable.numberOfIndexSubTables):
i = curTable.indexSubTableArrayOffset + subtableIndex * indexSubTableArraySize
tup = struct.unpack(indexSubTableArrayFormat, data[i:i+indexSubTableArraySize])
(firstGlyphIndex, lastGlyphIndex, additionalOffsetToIndexSubtable) = tup
i = curTable.indexSubTableArrayOffset + additionalOffsetToIndexSubtable
tup = struct.unpack(indexSubHeaderFormat, data[i:i+indexSubHeaderSize])
(indexFormat, imageFormat, imageDataOffset) = tup
indexFormatClass = self.getIndexFormatClass(indexFormat)
indexSubTable = indexFormatClass(data[i+indexSubHeaderSize:], ttFont)
indexSubTable.firstGlyphIndex = firstGlyphIndex
indexSubTable.lastGlyphIndex = lastGlyphIndex
indexSubTable.additionalOffsetToIndexSubtable = additionalOffsetToIndexSubtable
indexSubTable.indexFormat = indexFormat
indexSubTable.imageFormat = imageFormat
indexSubTable.imageDataOffset = imageDataOffset
indexSubTable.decompile() # https://github.com/fonttools/fonttools/issues/317
curStrike.indexSubTables.append(indexSubTable)
def compile(self, ttFont):
dataList = []
self.numSizes = len(self.strikes)
dataList.append(sstruct.pack(eblcHeaderFormat, self))
# Data size of the header + bitmapSizeTable needs to be calculated
# in order to form offsets. This value will hold the size of the data
# in dataList after all the data is consolidated in dataList.
dataSize = len(dataList[0])
# The table will be structured in the following order:
# (0) header
# (1) Each bitmapSizeTable [1 ... self.numSizes]
# (2) Alternate between indexSubTableArray and indexSubTable
# for each bitmapSizeTable present.
#
# The issue is maintaining the proper offsets when table information
# gets moved around. All offsets and size information must be recalculated
# when building the table to allow editing within ttLib and also allow easy
# import/export to and from XML. All of this offset information is lost
# when exporting to XML so everything must be calculated fresh so importing
# from XML will work cleanly. Only byte offset and size information is
# calculated fresh. Count information like numberOfIndexSubTables is
# checked through assertions. If the information in this table was not
# touched or was changed properly then these types of values should match.
#
# The table will be rebuilt the following way:
# (0) Precompute the size of all the bitmapSizeTables. This is needed to
# compute the offsets properly.
# (1) For each bitmapSizeTable compute the indexSubTable and
# indexSubTableArray pair. The indexSubTable must be computed first
# so that the offset information in indexSubTableArray can be
# calculated. Update the data size after each pairing.
# (2) Build each bitmapSizeTable.
# (3) Consolidate all the data into the main dataList in the correct order.
for curStrike in self.strikes:
dataSize += sstruct.calcsize(bitmapSizeTableFormatPart1)
dataSize += len(('hori', 'vert')) * sstruct.calcsize(sbitLineMetricsFormat)
dataSize += sstruct.calcsize(bitmapSizeTableFormatPart2)
indexSubTablePairDataList = []
for curStrike in self.strikes:
curTable = curStrike.bitmapSizeTable
curTable.numberOfIndexSubTables = len(curStrike.indexSubTables)
curTable.indexSubTableArrayOffset = dataSize
# Precompute the size of the indexSubTableArray. This information
# is important for correctly calculating the new value for
# additionalOffsetToIndexSubtable.
sizeOfSubTableArray = curTable.numberOfIndexSubTables * indexSubTableArraySize
lowerBound = dataSize
dataSize += sizeOfSubTableArray
upperBound = dataSize
indexSubTableDataList = []
for indexSubTable in curStrike.indexSubTables:
indexSubTable.additionalOffsetToIndexSubtable = dataSize - curTable.indexSubTableArrayOffset
glyphIds = list(map(ttFont.getGlyphID, indexSubTable.names))
indexSubTable.firstGlyphIndex = min(glyphIds)
indexSubTable.lastGlyphIndex = max(glyphIds)
data = indexSubTable.compile(ttFont)
indexSubTableDataList.append(data)
dataSize += len(data)
curTable.startGlyphIndex = min(ist.firstGlyphIndex for ist in curStrike.indexSubTables)
curTable.endGlyphIndex = max(ist.lastGlyphIndex for ist in curStrike.indexSubTables)
for i in curStrike.indexSubTables:
data = struct.pack(indexSubHeaderFormat, i.firstGlyphIndex, i.lastGlyphIndex, i.additionalOffsetToIndexSubtable)
indexSubTablePairDataList.append(data)
indexSubTablePairDataList.extend(indexSubTableDataList)
curTable.indexTablesSize = dataSize - curTable.indexSubTableArrayOffset
for curStrike in self.strikes:
curTable = curStrike.bitmapSizeTable
data = sstruct.pack(bitmapSizeTableFormatPart1, curTable)
dataList.append(data)
for metric in ('hori', 'vert'):
metricObj = vars(curTable)[metric]
data = sstruct.pack(sbitLineMetricsFormat, metricObj)
dataList.append(data)
data = sstruct.pack(bitmapSizeTableFormatPart2, curTable)
dataList.append(data)
dataList.extend(indexSubTablePairDataList)
return bytesjoin(dataList)
def toXML(self, writer, ttFont):
writer.simpletag('header', [('version', self.version)])
writer.newline()
for curIndex, curStrike in enumerate(self.strikes):
curStrike.toXML(curIndex, writer, ttFont)
def fromXML(self, name, attrs, content, ttFont):
if name == 'header':
self.version = safeEval(attrs['version'])
elif name == 'strike':
if not hasattr(self, 'strikes'):
self.strikes = []
strikeIndex = safeEval(attrs['index'])
curStrike = Strike()
curStrike.fromXML(name, attrs, content, ttFont, self)
# Grow the strike array to the appropriate size. The XML format
# allows for the strike index value to be out of order.
if strikeIndex >= len(self.strikes):
self.strikes += [None] * (strikeIndex + 1 - len(self.strikes))
assert self.strikes[strikeIndex] is None, "Duplicate strike EBLC indices."
self.strikes[strikeIndex] = curStrike
class Strike(object):
def __init__(self):
self.bitmapSizeTable = BitmapSizeTable()
self.indexSubTables = []
def toXML(self, strikeIndex, writer, ttFont):
writer.begintag('strike', [('index', strikeIndex)])
writer.newline()
self.bitmapSizeTable.toXML(writer, ttFont)
writer.comment('GlyphIds are written but not read. The firstGlyphIndex and\nlastGlyphIndex values will be recalculated by the compiler.')
writer.newline()
for indexSubTable in self.indexSubTables:
indexSubTable.toXML(writer, ttFont)
writer.endtag('strike')
writer.newline()
def fromXML(self, name, attrs, content, ttFont, locator):
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == 'bitmapSizeTable':
self.bitmapSizeTable.fromXML(name, attrs, content, ttFont)
elif name.startswith(_indexSubTableSubclassPrefix):
indexFormat = safeEval(name[len(_indexSubTableSubclassPrefix):])
indexFormatClass = locator.getIndexFormatClass(indexFormat)
indexSubTable = indexFormatClass(None, None)
indexSubTable.indexFormat = indexFormat
indexSubTable.fromXML(name, attrs, content, ttFont)
self.indexSubTables.append(indexSubTable)
class BitmapSizeTable(object):
# Returns all the simple metric names that bitmap size table
# cares about in terms of XML creation.
def _getXMLMetricNames(self):
dataNames = sstruct.getformat(bitmapSizeTableFormatPart1)[1]
dataNames = dataNames + sstruct.getformat(bitmapSizeTableFormatPart2)[1]
# Skip the first 3 data names because they are byte offsets and counts.
return dataNames[3:]
def toXML(self, writer, ttFont):
writer.begintag('bitmapSizeTable')
writer.newline()
for metric in ('hori', 'vert'):
getattr(self, metric).toXML(metric, writer, ttFont)
for metricName in self._getXMLMetricNames():
writer.simpletag(metricName, value=getattr(self, metricName))
writer.newline()
writer.endtag('bitmapSizeTable')
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
# Create a lookup for all the simple names that make sense to
# bitmap size table. Only read the information from these names.
dataNames = set(self._getXMLMetricNames())
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == 'sbitLineMetrics':
direction = attrs['direction']
assert direction in ('hori', 'vert'), "SbitLineMetrics direction specified invalid."
metricObj = SbitLineMetrics()
metricObj.fromXML(name, attrs, content, ttFont)
vars(self)[direction] = metricObj
elif name in dataNames:
vars(self)[name] = safeEval(attrs['value'])
else:
log.warning("unknown name '%s' being ignored in BitmapSizeTable.", name)
class SbitLineMetrics(object):
def toXML(self, name, writer, ttFont):
writer.begintag('sbitLineMetrics', [('direction', name)])
writer.newline()
for metricName in sstruct.getformat(sbitLineMetricsFormat)[1]:
writer.simpletag(metricName, value=getattr(self, metricName))
writer.newline()
writer.endtag('sbitLineMetrics')
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
metricNames = set(sstruct.getformat(sbitLineMetricsFormat)[1])
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name in metricNames:
vars(self)[name] = safeEval(attrs['value'])
# Important information about the naming scheme. Used for identifying subtables.
_indexSubTableSubclassPrefix = 'eblc_index_sub_table_'
class EblcIndexSubTable(object):
def __init__(self, data, ttFont):
self.data = data
self.ttFont = ttFont
# TODO Currently non-lazy decompiling doesn't work for this class...
#if not ttFont.lazy:
# self.decompile()
# del self.data, self.ttFont
def __getattr__(self, attr):
# Allow lazy decompile.
if attr[:2] == '__':
raise AttributeError(attr)
if not hasattr(self, "data"):
raise AttributeError(attr)
self.decompile()
return getattr(self, attr)
# This method just takes care of the indexSubHeader. Implementing subclasses
# should call it to compile the indexSubHeader and then continue compiling
# the remainder of their unique format.
def compile(self, ttFont):
return struct.pack(indexSubHeaderFormat, self.indexFormat, self.imageFormat, self.imageDataOffset)
# Creates the XML for bitmap glyphs. Each index sub table basically makes
# the same XML except for specific metric information that is written
# out via a method call that a subclass implements optionally.
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__, [
('imageFormat', self.imageFormat),
('firstGlyphIndex', self.firstGlyphIndex),
('lastGlyphIndex', self.lastGlyphIndex),
])
writer.newline()
self.writeMetrics(writer, ttFont)
# Write out the names as thats all thats needed to rebuild etc.
# For font debugging of consecutive formats the ids are also written.
# The ids are not read when moving from the XML format.
glyphIds = map(ttFont.getGlyphID, self.names)
for glyphName, glyphId in zip(self.names, glyphIds):
writer.simpletag('glyphLoc', name=glyphName, id=glyphId)
writer.newline()
writer.endtag(self.__class__.__name__)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
# Read all the attributes. Even though the glyph indices are
# recalculated, they are still read in case there needs to
# be an immediate export of the data.
self.imageFormat = safeEval(attrs['imageFormat'])
self.firstGlyphIndex = safeEval(attrs['firstGlyphIndex'])
self.lastGlyphIndex = safeEval(attrs['lastGlyphIndex'])
self.readMetrics(name, attrs, content, ttFont)
self.names = []
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == 'glyphLoc':
self.names.append(attrs['name'])
# A helper method that writes the metrics for the index sub table. It also
# is responsible for writing the image size for fixed size data since fixed
# size is not recalculated on compile. Default behavior is to do nothing.
def writeMetrics(self, writer, ttFont):
pass
# A helper method that is the inverse of writeMetrics.
def readMetrics(self, name, attrs, content, ttFont):
pass
# This method is for fixed glyph data sizes. There are formats where
# the glyph data is fixed but are actually composite glyphs. To handle
# this the font spec in indexSubTable makes the data the size of the
# fixed size by padding the component arrays. This function abstracts
# out this padding process. Input is data unpadded. Output is data
# padded only in fixed formats. Default behavior is to return the data.
def padBitmapData(self, data):
return data
# Remove any of the glyph locations and names that are flagged as skipped.
# This only occurs in formats {1,3}.
def removeSkipGlyphs(self):
# Determines if a name, location pair is a valid data location.
# Skip glyphs are marked when the size is equal to zero.
def isValidLocation(args):
(name, (startByte, endByte)) = args
return startByte < endByte
# Remove all skip glyphs.
dataPairs = list(filter(isValidLocation, zip(self.names, self.locations)))
self.names, self.locations = list(map(list, zip(*dataPairs)))
# A closure for creating a custom mixin. This is done because formats 1 and 3
# are very similar. The only difference between them is the size per offset
# value. Code put in here should handle both cases generally.
def _createOffsetArrayIndexSubTableMixin(formatStringForDataType):
# Prep the data size for the offset array data format.
dataFormat = '>'+formatStringForDataType
offsetDataSize = struct.calcsize(dataFormat)
class OffsetArrayIndexSubTableMixin(object):
def decompile(self):
numGlyphs = self.lastGlyphIndex - self.firstGlyphIndex + 1
indexingOffsets = [glyphIndex * offsetDataSize for glyphIndex in range(numGlyphs+2)]
indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
offsetArray = [struct.unpack(dataFormat, self.data[slice(*loc)])[0] for loc in indexingLocations]
glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
modifiedOffsets = [offset + self.imageDataOffset for offset in offsetArray]
self.locations = list(zip(modifiedOffsets, modifiedOffsets[1:]))
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
self.removeSkipGlyphs()
del self.data, self.ttFont
def compile(self, ttFont):
# First make sure that all the data lines up properly. Formats 1 and 3
# must have all its data lined up consecutively. If not this will fail.
for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
assert curLoc[1] == nxtLoc[0], "Data must be consecutive in indexSubTable offset formats"
glyphIds = list(map(ttFont.getGlyphID, self.names))
# Make sure that all ids are sorted strictly increasing.
assert all(glyphIds[i] < glyphIds[i+1] for i in range(len(glyphIds)-1))
# Run a simple algorithm to add skip glyphs to the data locations at
# the places where an id is not present.
idQueue = deque(glyphIds)
locQueue = deque(self.locations)
allGlyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
allLocations = []
for curId in allGlyphIds:
if curId != idQueue[0]:
allLocations.append((locQueue[0][0], locQueue[0][0]))
else:
idQueue.popleft()
allLocations.append(locQueue.popleft())
# Now that all the locations are collected, pack them appropriately into
# offsets. This is the form where offset[i] is the location and
# offset[i+1]-offset[i] is the size of the data location.
offsets = list(allLocations[0]) + [loc[1] for loc in allLocations[1:]]
# Image data offset must be less than or equal to the minimum of locations.
# This offset may change the value for round tripping but is safer and
# allows imageDataOffset to not be required to be in the XML version.
self.imageDataOffset = min(offsets)
offsetArray = [offset - self.imageDataOffset for offset in offsets]
dataList = [EblcIndexSubTable.compile(self, ttFont)]
dataList += [struct.pack(dataFormat, offsetValue) for offsetValue in offsetArray]
# Take care of any padding issues. Only occurs in format 3.
if offsetDataSize * len(offsetArray) % 4 != 0:
dataList.append(struct.pack(dataFormat, 0))
return bytesjoin(dataList)
return OffsetArrayIndexSubTableMixin
# A Mixin for functionality shared between the different kinds
# of fixed sized data handling. Both kinds have big metrics so
# that kind of special processing is also handled in this mixin.
class FixedSizeIndexSubTableMixin(object):
def writeMetrics(self, writer, ttFont):
writer.simpletag('imageSize', value=self.imageSize)
writer.newline()
self.metrics.toXML(writer, ttFont)
def readMetrics(self, name, attrs, content, ttFont):
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == 'imageSize':
self.imageSize = safeEval(attrs['value'])
elif name == BigGlyphMetrics.__name__:
self.metrics = BigGlyphMetrics()
self.metrics.fromXML(name, attrs, content, ttFont)
elif name == SmallGlyphMetrics.__name__:
log.warning("SmallGlyphMetrics being ignored in format %d.", self.indexFormat)
def padBitmapData(self, data):
# Make sure that the data isn't bigger than the fixed size.
assert len(data) <= self.imageSize, "Data in indexSubTable format %d must be less than the fixed size." % self.indexFormat
# Pad the data so that it matches the fixed size.
pad = (self.imageSize - len(data)) * b'\0'
return data + pad
class eblc_index_sub_table_1(_createOffsetArrayIndexSubTableMixin('L'), EblcIndexSubTable):
pass
class eblc_index_sub_table_2(FixedSizeIndexSubTableMixin, EblcIndexSubTable):
def decompile(self):
(self.imageSize,) = struct.unpack(">L", self.data[:4])
self.metrics = BigGlyphMetrics()
sstruct.unpack2(bigGlyphMetricsFormat, self.data[4:], self.metrics)
glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)]
self.locations = list(zip(offsets, offsets[1:]))
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
del self.data, self.ttFont
def compile(self, ttFont):
glyphIds = list(map(ttFont.getGlyphID, self.names))
# Make sure all the ids are consecutive. This is required by Format 2.
assert glyphIds == list(range(self.firstGlyphIndex, self.lastGlyphIndex+1)), "Format 2 ids must be consecutive."
self.imageDataOffset = min(next(iter(zip(*self.locations))))
dataList = [EblcIndexSubTable.compile(self, ttFont)]
dataList.append(struct.pack(">L", self.imageSize))
dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
return bytesjoin(dataList)
class eblc_index_sub_table_3(_createOffsetArrayIndexSubTableMixin('H'), EblcIndexSubTable):
pass
class eblc_index_sub_table_4(EblcIndexSubTable):
def decompile(self):
(numGlyphs,) = struct.unpack(">L", self.data[:4])
data = self.data[4:]
indexingOffsets = [glyphIndex * codeOffsetPairSize for glyphIndex in range(numGlyphs+2)]
indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
glyphArray = [struct.unpack(codeOffsetPairFormat, data[slice(*loc)]) for loc in indexingLocations]
glyphIds, offsets = list(map(list, zip(*glyphArray)))
# There are one too many glyph ids. Get rid of the last one.
glyphIds.pop()
offsets = [offset + self.imageDataOffset for offset in offsets]
self.locations = list(zip(offsets, offsets[1:]))
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
del self.data, self.ttFont
def compile(self, ttFont):
# First make sure that all the data lines up properly. Format 4
# must have all its data lined up consecutively. If not this will fail.
for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
assert curLoc[1] == nxtLoc[0], "Data must be consecutive in indexSubTable format 4"
offsets = list(self.locations[0]) + [loc[1] for loc in self.locations[1:]]
# Image data offset must be less than or equal to the minimum of locations.
# Resetting this offset may change the value for round tripping but is safer
# and allows imageDataOffset to not be required to be in the XML version.
self.imageDataOffset = min(offsets)
offsets = [offset - self.imageDataOffset for offset in offsets]
glyphIds = list(map(ttFont.getGlyphID, self.names))
# Create an iterator over the ids plus a padding value.
idsPlusPad = list(itertools.chain(glyphIds, [0]))
dataList = [EblcIndexSubTable.compile(self, ttFont)]
dataList.append(struct.pack(">L", len(glyphIds)))
tmp = [struct.pack(codeOffsetPairFormat, *cop) for cop in zip(idsPlusPad, offsets)]
dataList += tmp
data = bytesjoin(dataList)
return data
class eblc_index_sub_table_5(FixedSizeIndexSubTableMixin, EblcIndexSubTable):
def decompile(self):
self.origDataLen = 0
(self.imageSize,) = struct.unpack(">L", self.data[:4])
data = self.data[4:]
self.metrics, data = sstruct.unpack2(bigGlyphMetricsFormat, data, BigGlyphMetrics())
(numGlyphs,) = struct.unpack(">L", data[:4])
data = data[4:]
glyphIds = [struct.unpack(">H", data[2*i:2*(i+1)])[0] for i in range(numGlyphs)]
offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)]
self.locations = list(zip(offsets, offsets[1:]))
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
del self.data, self.ttFont
def compile(self, ttFont):
self.imageDataOffset = min(next(iter(zip(*self.locations))))
dataList = [EblcIndexSubTable.compile(self, ttFont)]
dataList.append(struct.pack(">L", self.imageSize))
dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
glyphIds = list(map(ttFont.getGlyphID, self.names))
dataList.append(struct.pack(">L", len(glyphIds)))
dataList += [struct.pack(">H", curId) for curId in glyphIds]
if len(glyphIds) % 2 == 1:
dataList.append(struct.pack(">H", 0))
return bytesjoin(dataList)
# Dictionary of indexFormat to the class representing that format.
eblc_sub_table_classes = {
1: eblc_index_sub_table_1,
2: eblc_index_sub_table_2,
3: eblc_index_sub_table_3,
4: eblc_index_sub_table_4,
5: eblc_index_sub_table_5,
}
| 40.154705 | 139 | 0.743059 | from fontTools.misc.py23 import *
from fontTools.misc import sstruct
from . import DefaultTable
from fontTools.misc.textTools import safeEval
from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat
import struct
import itertools
from collections import deque
import logging
log = logging.getLogger(__name__)
eblcHeaderFormat = """
> # big endian
version: 16.16F
numSizes: I
"""
bitmapSizeTableFormatPart1 = """
> # big endian
indexSubTableArrayOffset: I
indexTablesSize: I
numberOfIndexSubTables: I
colorRef: I
"""
sbitLineMetricsFormat = """
> # big endian
ascender: b
descender: b
widthMax: B
caretSlopeNumerator: b
caretSlopeDenominator: b
caretOffset: b
minOriginSB: b
minAdvanceSB: b
maxBeforeBL: b
minAfterBL: b
pad1: b
pad2: b
"""
bitmapSizeTableFormatPart2 = """
> # big endian
startGlyphIndex: H
endGlyphIndex: H
ppemX: B
ppemY: B
bitDepth: B
flags: b
"""
indexSubTableArrayFormat = ">HHL"
indexSubTableArraySize = struct.calcsize(indexSubTableArrayFormat)
indexSubHeaderFormat = ">HHL"
indexSubHeaderSize = struct.calcsize(indexSubHeaderFormat)
codeOffsetPairFormat = ">HH"
codeOffsetPairSize = struct.calcsize(codeOffsetPairFormat)
class table_E_B_L_C_(DefaultTable.DefaultTable):
dependencies = ['EBDT']
def getIndexFormatClass(self, indexFormat):
return eblc_sub_table_classes[indexFormat]
def decompile(self, data, ttFont):
origData = data
i = 0;
dummy = sstruct.unpack(eblcHeaderFormat, data[:8], self)
i += 8;
self.strikes = []
for curStrikeIndex in range(self.numSizes):
curStrike = Strike()
self.strikes.append(curStrike)
curTable = curStrike.bitmapSizeTable
dummy = sstruct.unpack2(bitmapSizeTableFormatPart1, data[i:i+16], curTable)
i += 16
for metric in ('hori', 'vert'):
metricObj = SbitLineMetrics()
vars(curTable)[metric] = metricObj
dummy = sstruct.unpack2(sbitLineMetricsFormat, data[i:i+12], metricObj)
i += 12
dummy = sstruct.unpack(bitmapSizeTableFormatPart2, data[i:i+8], curTable)
i += 8
for curStrike in self.strikes:
curTable = curStrike.bitmapSizeTable
for subtableIndex in range(curTable.numberOfIndexSubTables):
i = curTable.indexSubTableArrayOffset + subtableIndex * indexSubTableArraySize
tup = struct.unpack(indexSubTableArrayFormat, data[i:i+indexSubTableArraySize])
(firstGlyphIndex, lastGlyphIndex, additionalOffsetToIndexSubtable) = tup
i = curTable.indexSubTableArrayOffset + additionalOffsetToIndexSubtable
tup = struct.unpack(indexSubHeaderFormat, data[i:i+indexSubHeaderSize])
(indexFormat, imageFormat, imageDataOffset) = tup
indexFormatClass = self.getIndexFormatClass(indexFormat)
indexSubTable = indexFormatClass(data[i+indexSubHeaderSize:], ttFont)
indexSubTable.firstGlyphIndex = firstGlyphIndex
indexSubTable.lastGlyphIndex = lastGlyphIndex
indexSubTable.additionalOffsetToIndexSubtable = additionalOffsetToIndexSubtable
indexSubTable.indexFormat = indexFormat
indexSubTable.imageFormat = imageFormat
indexSubTable.imageDataOffset = imageDataOffset
indexSubTable.decompile()
curStrike.indexSubTables.append(indexSubTable)
def compile(self, ttFont):
dataList = []
self.numSizes = len(self.strikes)
dataList.append(sstruct.pack(eblcHeaderFormat, self))
dataSize = len(dataList[0])
for curStrike in self.strikes:
dataSize += sstruct.calcsize(bitmapSizeTableFormatPart1)
dataSize += len(('hori', 'vert')) * sstruct.calcsize(sbitLineMetricsFormat)
dataSize += sstruct.calcsize(bitmapSizeTableFormatPart2)
indexSubTablePairDataList = []
for curStrike in self.strikes:
curTable = curStrike.bitmapSizeTable
curTable.numberOfIndexSubTables = len(curStrike.indexSubTables)
curTable.indexSubTableArrayOffset = dataSize
sizeOfSubTableArray = curTable.numberOfIndexSubTables * indexSubTableArraySize
lowerBound = dataSize
dataSize += sizeOfSubTableArray
upperBound = dataSize
indexSubTableDataList = []
for indexSubTable in curStrike.indexSubTables:
indexSubTable.additionalOffsetToIndexSubtable = dataSize - curTable.indexSubTableArrayOffset
glyphIds = list(map(ttFont.getGlyphID, indexSubTable.names))
indexSubTable.firstGlyphIndex = min(glyphIds)
indexSubTable.lastGlyphIndex = max(glyphIds)
data = indexSubTable.compile(ttFont)
indexSubTableDataList.append(data)
dataSize += len(data)
curTable.startGlyphIndex = min(ist.firstGlyphIndex for ist in curStrike.indexSubTables)
curTable.endGlyphIndex = max(ist.lastGlyphIndex for ist in curStrike.indexSubTables)
for i in curStrike.indexSubTables:
data = struct.pack(indexSubHeaderFormat, i.firstGlyphIndex, i.lastGlyphIndex, i.additionalOffsetToIndexSubtable)
indexSubTablePairDataList.append(data)
indexSubTablePairDataList.extend(indexSubTableDataList)
curTable.indexTablesSize = dataSize - curTable.indexSubTableArrayOffset
for curStrike in self.strikes:
curTable = curStrike.bitmapSizeTable
data = sstruct.pack(bitmapSizeTableFormatPart1, curTable)
dataList.append(data)
for metric in ('hori', 'vert'):
metricObj = vars(curTable)[metric]
data = sstruct.pack(sbitLineMetricsFormat, metricObj)
dataList.append(data)
data = sstruct.pack(bitmapSizeTableFormatPart2, curTable)
dataList.append(data)
dataList.extend(indexSubTablePairDataList)
return bytesjoin(dataList)
def toXML(self, writer, ttFont):
writer.simpletag('header', [('version', self.version)])
writer.newline()
for curIndex, curStrike in enumerate(self.strikes):
curStrike.toXML(curIndex, writer, ttFont)
def fromXML(self, name, attrs, content, ttFont):
if name == 'header':
self.version = safeEval(attrs['version'])
elif name == 'strike':
if not hasattr(self, 'strikes'):
self.strikes = []
strikeIndex = safeEval(attrs['index'])
curStrike = Strike()
curStrike.fromXML(name, attrs, content, ttFont, self)
if strikeIndex >= len(self.strikes):
self.strikes += [None] * (strikeIndex + 1 - len(self.strikes))
assert self.strikes[strikeIndex] is None, "Duplicate strike EBLC indices."
self.strikes[strikeIndex] = curStrike
class Strike(object):
def __init__(self):
self.bitmapSizeTable = BitmapSizeTable()
self.indexSubTables = []
def toXML(self, strikeIndex, writer, ttFont):
writer.begintag('strike', [('index', strikeIndex)])
writer.newline()
self.bitmapSizeTable.toXML(writer, ttFont)
writer.comment('GlyphIds are written but not read. The firstGlyphIndex and\nlastGlyphIndex values will be recalculated by the compiler.')
writer.newline()
for indexSubTable in self.indexSubTables:
indexSubTable.toXML(writer, ttFont)
writer.endtag('strike')
writer.newline()
def fromXML(self, name, attrs, content, ttFont, locator):
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == 'bitmapSizeTable':
self.bitmapSizeTable.fromXML(name, attrs, content, ttFont)
elif name.startswith(_indexSubTableSubclassPrefix):
indexFormat = safeEval(name[len(_indexSubTableSubclassPrefix):])
indexFormatClass = locator.getIndexFormatClass(indexFormat)
indexSubTable = indexFormatClass(None, None)
indexSubTable.indexFormat = indexFormat
indexSubTable.fromXML(name, attrs, content, ttFont)
self.indexSubTables.append(indexSubTable)
class BitmapSizeTable(object):
def _getXMLMetricNames(self):
dataNames = sstruct.getformat(bitmapSizeTableFormatPart1)[1]
dataNames = dataNames + sstruct.getformat(bitmapSizeTableFormatPart2)[1]
return dataNames[3:]
def toXML(self, writer, ttFont):
writer.begintag('bitmapSizeTable')
writer.newline()
for metric in ('hori', 'vert'):
getattr(self, metric).toXML(metric, writer, ttFont)
for metricName in self._getXMLMetricNames():
writer.simpletag(metricName, value=getattr(self, metricName))
writer.newline()
writer.endtag('bitmapSizeTable')
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
dataNames = set(self._getXMLMetricNames())
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == 'sbitLineMetrics':
direction = attrs['direction']
assert direction in ('hori', 'vert'), "SbitLineMetrics direction specified invalid."
metricObj = SbitLineMetrics()
metricObj.fromXML(name, attrs, content, ttFont)
vars(self)[direction] = metricObj
elif name in dataNames:
vars(self)[name] = safeEval(attrs['value'])
else:
log.warning("unknown name '%s' being ignored in BitmapSizeTable.", name)
class SbitLineMetrics(object):
def toXML(self, name, writer, ttFont):
writer.begintag('sbitLineMetrics', [('direction', name)])
writer.newline()
for metricName in sstruct.getformat(sbitLineMetricsFormat)[1]:
writer.simpletag(metricName, value=getattr(self, metricName))
writer.newline()
writer.endtag('sbitLineMetrics')
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
metricNames = set(sstruct.getformat(sbitLineMetricsFormat)[1])
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name in metricNames:
vars(self)[name] = safeEval(attrs['value'])
_indexSubTableSubclassPrefix = 'eblc_index_sub_table_'
class EblcIndexSubTable(object):
def __init__(self, data, ttFont):
self.data = data
self.ttFont = ttFont
#if not ttFont.lazy:
# self.decompile()
# del self.data, self.ttFont
def __getattr__(self, attr):
# Allow lazy decompile.
if attr[:2] == '__':
raise AttributeError(attr)
if not hasattr(self, "data"):
raise AttributeError(attr)
self.decompile()
return getattr(self, attr)
# This method just takes care of the indexSubHeader. Implementing subclasses
# should call it to compile the indexSubHeader and then continue compiling
# the remainder of their unique format.
def compile(self, ttFont):
return struct.pack(indexSubHeaderFormat, self.indexFormat, self.imageFormat, self.imageDataOffset)
# Creates the XML for bitmap glyphs. Each index sub table basically makes
# the same XML except for specific metric information that is written
# out via a method call that a subclass implements optionally.
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__, [
('imageFormat', self.imageFormat),
('firstGlyphIndex', self.firstGlyphIndex),
('lastGlyphIndex', self.lastGlyphIndex),
])
writer.newline()
self.writeMetrics(writer, ttFont)
# Write out the names as thats all thats needed to rebuild etc.
# For font debugging of consecutive formats the ids are also written.
# The ids are not read when moving from the XML format.
glyphIds = map(ttFont.getGlyphID, self.names)
for glyphName, glyphId in zip(self.names, glyphIds):
writer.simpletag('glyphLoc', name=glyphName, id=glyphId)
writer.newline()
writer.endtag(self.__class__.__name__)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
# Read all the attributes. Even though the glyph indices are
# recalculated, they are still read in case there needs to
# be an immediate export of the data.
self.imageFormat = safeEval(attrs['imageFormat'])
self.firstGlyphIndex = safeEval(attrs['firstGlyphIndex'])
self.lastGlyphIndex = safeEval(attrs['lastGlyphIndex'])
self.readMetrics(name, attrs, content, ttFont)
self.names = []
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == 'glyphLoc':
self.names.append(attrs['name'])
# A helper method that writes the metrics for the index sub table. It also
# is responsible for writing the image size for fixed size data since fixed
# size is not recalculated on compile. Default behavior is to do nothing.
def writeMetrics(self, writer, ttFont):
pass
# A helper method that is the inverse of writeMetrics.
def readMetrics(self, name, attrs, content, ttFont):
pass
# This method is for fixed glyph data sizes. There are formats where
# the glyph data is fixed but are actually composite glyphs. To handle
# this the font spec in indexSubTable makes the data the size of the
# fixed size by padding the component arrays. This function abstracts
# out this padding process. Input is data unpadded. Output is data
# padded only in fixed formats. Default behavior is to return the data.
def padBitmapData(self, data):
return data
# Remove any of the glyph locations and names that are flagged as skipped.
# This only occurs in formats {1,3}.
def removeSkipGlyphs(self):
# Determines if a name, location pair is a valid data location.
# Skip glyphs are marked when the size is equal to zero.
def isValidLocation(args):
(name, (startByte, endByte)) = args
return startByte < endByte
# Remove all skip glyphs.
dataPairs = list(filter(isValidLocation, zip(self.names, self.locations)))
self.names, self.locations = list(map(list, zip(*dataPairs)))
# A closure for creating a custom mixin. This is done because formats 1 and 3
# are very similar. The only difference between them is the size per offset
# value. Code put in here should handle both cases generally.
def _createOffsetArrayIndexSubTableMixin(formatStringForDataType):
# Prep the data size for the offset array data format.
dataFormat = '>'+formatStringForDataType
offsetDataSize = struct.calcsize(dataFormat)
class OffsetArrayIndexSubTableMixin(object):
def decompile(self):
numGlyphs = self.lastGlyphIndex - self.firstGlyphIndex + 1
indexingOffsets = [glyphIndex * offsetDataSize for glyphIndex in range(numGlyphs+2)]
indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
offsetArray = [struct.unpack(dataFormat, self.data[slice(*loc)])[0] for loc in indexingLocations]
glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
modifiedOffsets = [offset + self.imageDataOffset for offset in offsetArray]
self.locations = list(zip(modifiedOffsets, modifiedOffsets[1:]))
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
self.removeSkipGlyphs()
del self.data, self.ttFont
def compile(self, ttFont):
# First make sure that all the data lines up properly. Formats 1 and 3
# must have all its data lined up consecutively. If not this will fail.
for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
assert curLoc[1] == nxtLoc[0], "Data must be consecutive in indexSubTable offset formats"
glyphIds = list(map(ttFont.getGlyphID, self.names))
# Make sure that all ids are sorted strictly increasing.
assert all(glyphIds[i] < glyphIds[i+1] for i in range(len(glyphIds)-1))
# Run a simple algorithm to add skip glyphs to the data locations at
# the places where an id is not present.
idQueue = deque(glyphIds)
locQueue = deque(self.locations)
allGlyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
allLocations = []
for curId in allGlyphIds:
if curId != idQueue[0]:
allLocations.append((locQueue[0][0], locQueue[0][0]))
else:
idQueue.popleft()
allLocations.append(locQueue.popleft())
# Now that all the locations are collected, pack them appropriately into
# offsets. This is the form where offset[i] is the location and
# offset[i+1]-offset[i] is the size of the data location.
offsets = list(allLocations[0]) + [loc[1] for loc in allLocations[1:]]
# Image data offset must be less than or equal to the minimum of locations.
# This offset may change the value for round tripping but is safer and
# allows imageDataOffset to not be required to be in the XML version.
self.imageDataOffset = min(offsets)
offsetArray = [offset - self.imageDataOffset for offset in offsets]
dataList = [EblcIndexSubTable.compile(self, ttFont)]
dataList += [struct.pack(dataFormat, offsetValue) for offsetValue in offsetArray]
# Take care of any padding issues. Only occurs in format 3.
if offsetDataSize * len(offsetArray) % 4 != 0:
dataList.append(struct.pack(dataFormat, 0))
return bytesjoin(dataList)
return OffsetArrayIndexSubTableMixin
# A Mixin for functionality shared between the different kinds
# of fixed sized data handling. Both kinds have big metrics so
# that kind of special processing is also handled in this mixin.
class FixedSizeIndexSubTableMixin(object):
def writeMetrics(self, writer, ttFont):
writer.simpletag('imageSize', value=self.imageSize)
writer.newline()
self.metrics.toXML(writer, ttFont)
def readMetrics(self, name, attrs, content, ttFont):
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == 'imageSize':
self.imageSize = safeEval(attrs['value'])
elif name == BigGlyphMetrics.__name__:
self.metrics = BigGlyphMetrics()
self.metrics.fromXML(name, attrs, content, ttFont)
elif name == SmallGlyphMetrics.__name__:
log.warning("SmallGlyphMetrics being ignored in format %d.", self.indexFormat)
def padBitmapData(self, data):
# Make sure that the data isn't bigger than the fixed size.
assert len(data) <= self.imageSize, "Data in indexSubTable format %d must be less than the fixed size." % self.indexFormat
pad = (self.imageSize - len(data)) * b'\0'
return data + pad
class eblc_index_sub_table_1(_createOffsetArrayIndexSubTableMixin('L'), EblcIndexSubTable):
pass
class eblc_index_sub_table_2(FixedSizeIndexSubTableMixin, EblcIndexSubTable):
def decompile(self):
(self.imageSize,) = struct.unpack(">L", self.data[:4])
self.metrics = BigGlyphMetrics()
sstruct.unpack2(bigGlyphMetricsFormat, self.data[4:], self.metrics)
glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)]
self.locations = list(zip(offsets, offsets[1:]))
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
del self.data, self.ttFont
def compile(self, ttFont):
glyphIds = list(map(ttFont.getGlyphID, self.names))
assert glyphIds == list(range(self.firstGlyphIndex, self.lastGlyphIndex+1)), "Format 2 ids must be consecutive."
self.imageDataOffset = min(next(iter(zip(*self.locations))))
dataList = [EblcIndexSubTable.compile(self, ttFont)]
dataList.append(struct.pack(">L", self.imageSize))
dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
return bytesjoin(dataList)
class eblc_index_sub_table_3(_createOffsetArrayIndexSubTableMixin('H'), EblcIndexSubTable):
pass
class eblc_index_sub_table_4(EblcIndexSubTable):
def decompile(self):
(numGlyphs,) = struct.unpack(">L", self.data[:4])
data = self.data[4:]
indexingOffsets = [glyphIndex * codeOffsetPairSize for glyphIndex in range(numGlyphs+2)]
indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
glyphArray = [struct.unpack(codeOffsetPairFormat, data[slice(*loc)]) for loc in indexingLocations]
glyphIds, offsets = list(map(list, zip(*glyphArray)))
glyphIds.pop()
offsets = [offset + self.imageDataOffset for offset in offsets]
self.locations = list(zip(offsets, offsets[1:]))
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
del self.data, self.ttFont
def compile(self, ttFont):
for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
assert curLoc[1] == nxtLoc[0], "Data must be consecutive in indexSubTable format 4"
offsets = list(self.locations[0]) + [loc[1] for loc in self.locations[1:]]
self.imageDataOffset = min(offsets)
offsets = [offset - self.imageDataOffset for offset in offsets]
glyphIds = list(map(ttFont.getGlyphID, self.names))
idsPlusPad = list(itertools.chain(glyphIds, [0]))
dataList = [EblcIndexSubTable.compile(self, ttFont)]
dataList.append(struct.pack(">L", len(glyphIds)))
tmp = [struct.pack(codeOffsetPairFormat, *cop) for cop in zip(idsPlusPad, offsets)]
dataList += tmp
data = bytesjoin(dataList)
return data
class eblc_index_sub_table_5(FixedSizeIndexSubTableMixin, EblcIndexSubTable):
def decompile(self):
self.origDataLen = 0
(self.imageSize,) = struct.unpack(">L", self.data[:4])
data = self.data[4:]
self.metrics, data = sstruct.unpack2(bigGlyphMetricsFormat, data, BigGlyphMetrics())
(numGlyphs,) = struct.unpack(">L", data[:4])
data = data[4:]
glyphIds = [struct.unpack(">H", data[2*i:2*(i+1)])[0] for i in range(numGlyphs)]
offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)]
self.locations = list(zip(offsets, offsets[1:]))
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
del self.data, self.ttFont
def compile(self, ttFont):
self.imageDataOffset = min(next(iter(zip(*self.locations))))
dataList = [EblcIndexSubTable.compile(self, ttFont)]
dataList.append(struct.pack(">L", self.imageSize))
dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
glyphIds = list(map(ttFont.getGlyphID, self.names))
dataList.append(struct.pack(">L", len(glyphIds)))
dataList += [struct.pack(">H", curId) for curId in glyphIds]
if len(glyphIds) % 2 == 1:
dataList.append(struct.pack(">H", 0))
return bytesjoin(dataList)
eblc_sub_table_classes = {
1: eblc_index_sub_table_1,
2: eblc_index_sub_table_2,
3: eblc_index_sub_table_3,
4: eblc_index_sub_table_4,
5: eblc_index_sub_table_5,
}
| true | true |
f71ae98a1105d01fa234bdf25931e02f5302919f | 1,327 | py | Python | interface/gaussian.py | R-Laurent/detection | 66823e8664b66caadef2ee35ee197fd9a5066f56 | [
"MIT"
] | null | null | null | interface/gaussian.py | R-Laurent/detection | 66823e8664b66caadef2ee35ee197fd9a5066f56 | [
"MIT"
] | null | null | null | interface/gaussian.py | R-Laurent/detection | 66823e8664b66caadef2ee35ee197fd9a5066f56 | [
"MIT"
] | null | null | null | def generate_gaussianFile(geom, grid, logger, outdir="./", igrid=0, maxbq=200):
gaussianfile = outdir + \
"input_batch_{:05d}.com".format(igrid)
f = open(gaussianfile, "w")
# f.write("%OldChk=/home/aartigas/chk/molecule_spe.chk\n".format())
f.write("%nproc=8\n".format())
f.write("%mem=1000MB\n".format())
# f.write("#P b3lyp/6-311++G(d,p) SCF(Tight) CPHF(Separate) Int(Grid=SuperFine) Guess=Read NMR geom=connectivity\n\nTitle\n\n0 1\n".format())
f.write("#P b3lyp/6-311++G(d,p) SCF(Tight) CPHF(Separate) Int(Grid=SuperFine) NMR geom=connectivity\n\nTitle\n\n0 1\n".format())
nat = 0
for at in geom.atoms:
f.write("{:4s} {:16.10f} {:16.10f} {:16.10f}\n".format(at['label'], at['x'], at['y'], at['z']))
nat = nat + 1
nbq = 0
for at in grid[igrid:]:
f.write(
"Bq {0[0]:16.10f} {0[1]:16.10f} {0[2]:16.10f}\n".format(at))
nbq = nbq + 1
nat = nat + 1
igrid = igrid + 1
if (nbq == maxbq):
logger.info("Batch generation : {}".format(igrid))
generate_gaussianFile(
geom, grid, logger, outdir=outdir, igrid=igrid, maxbq = maxbq)
break
f.write("\n")
for i in range(nat):
f.write("{}\n".format(i + 1))
f.write("\n")
f.close()
return
| 39.029412 | 144 | 0.553881 | def generate_gaussianFile(geom, grid, logger, outdir="./", igrid=0, maxbq=200):
gaussianfile = outdir + \
"input_batch_{:05d}.com".format(igrid)
f = open(gaussianfile, "w")
f.write("%nproc=8\n".format())
f.write("%mem=1000MB\n".format())
f.write("#P b3lyp/6-311++G(d,p) SCF(Tight) CPHF(Separate) Int(Grid=SuperFine) NMR geom=connectivity\n\nTitle\n\n0 1\n".format())
nat = 0
for at in geom.atoms:
f.write("{:4s} {:16.10f} {:16.10f} {:16.10f}\n".format(at['label'], at['x'], at['y'], at['z']))
nat = nat + 1
nbq = 0
for at in grid[igrid:]:
f.write(
"Bq {0[0]:16.10f} {0[1]:16.10f} {0[2]:16.10f}\n".format(at))
nbq = nbq + 1
nat = nat + 1
igrid = igrid + 1
if (nbq == maxbq):
logger.info("Batch generation : {}".format(igrid))
generate_gaussianFile(
geom, grid, logger, outdir=outdir, igrid=igrid, maxbq = maxbq)
break
f.write("\n")
for i in range(nat):
f.write("{}\n".format(i + 1))
f.write("\n")
f.close()
return
| true | true |
f71aea3701b57a737d5aa551497f3aa64313bab4 | 6,197 | py | Python | mmdet/models/dense_heads/pisa_retinanet_head.py | zactodd/mmdetection | 9596b9a4c916ae601f9a8a641c3a0ea47265abec | [
"Apache-2.0"
] | 549 | 2020-01-02T05:14:57.000Z | 2022-03-29T18:34:12.000Z | mmdet/models/dense_heads/pisa_retinanet_head.py | wondervictor/lvis-mmdet | 68532eb6f4643ddf0179a4384c8c9e004a2c1d07 | [
"Apache-2.0"
] | 136 | 2021-07-11T11:26:54.000Z | 2022-03-31T02:45:34.000Z | mmdet/models/dense_heads/pisa_retinanet_head.py | wondervictor/lvis-mmdet | 68532eb6f4643ddf0179a4384c8c9e004a2c1d07 | [
"Apache-2.0"
] | 233 | 2020-01-18T03:46:27.000Z | 2022-03-19T03:17:47.000Z | import torch
from mmdet.core import force_fp32, images_to_levels
from ..builder import HEADS
from ..losses import carl_loss, isr_p
from .retina_head import RetinaHead
@HEADS.register_module()
class PISARetinaHead(RetinaHead):
"""PISA Retinanet Head.
The head owns the same structure with Retinanet Head, but differs in two
aspects:
1. Importance-based Sample Reweighting Positive (ISR-P) is applied to
change the positive loss weights.
2. Classification-aware regression loss is adopted as a third loss.
"""
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self,
cls_scores,
bbox_preds,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute losses of the head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W)
gt_bboxes (list[Tensor]): Ground truth bboxes of each image
with shape (num_obj, 4).
gt_labels (list[Tensor]): Ground truth labels of each image
with shape (num_obj, 4).
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image.
Default: None.
Returns:
dict: Loss dict, comprise classification loss, regression loss and
carl loss.
"""
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.anchor_generator.num_levels
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, img_metas, device=device)
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
cls_reg_targets = self.get_targets(
anchor_list,
valid_flag_list,
gt_bboxes,
img_metas,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels,
return_sampling_results=True)
if cls_reg_targets is None:
return None
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets
num_total_samples = (
num_total_pos + num_total_neg if self.sampling else num_total_pos)
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
# concat all level anchors and flags to a single tensor
concat_anchor_list = []
for i in range(len(anchor_list)):
concat_anchor_list.append(torch.cat(anchor_list[i]))
all_anchor_list = images_to_levels(concat_anchor_list,
num_level_anchors)
num_imgs = len(img_metas)
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, label_channels)
for cls_score in cls_scores
]
flatten_cls_scores = torch.cat(
flatten_cls_scores, dim=1).reshape(-1,
flatten_cls_scores[0].size(-1))
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)
for bbox_pred in bbox_preds
]
flatten_bbox_preds = torch.cat(
flatten_bbox_preds, dim=1).view(-1, flatten_bbox_preds[0].size(-1))
flatten_labels = torch.cat(labels_list, dim=1).reshape(-1)
flatten_label_weights = torch.cat(
label_weights_list, dim=1).reshape(-1)
flatten_anchors = torch.cat(all_anchor_list, dim=1).reshape(-1, 4)
flatten_bbox_targets = torch.cat(
bbox_targets_list, dim=1).reshape(-1, 4)
flatten_bbox_weights = torch.cat(
bbox_weights_list, dim=1).reshape(-1, 4)
# Apply ISR-P
isr_cfg = self.train_cfg.get('isr', None)
if isr_cfg is not None:
all_targets = (flatten_labels, flatten_label_weights,
flatten_bbox_targets, flatten_bbox_weights)
with torch.no_grad():
all_targets = isr_p(
flatten_cls_scores,
flatten_bbox_preds,
all_targets,
flatten_anchors,
sampling_results_list,
bbox_coder=self.bbox_coder,
loss_cls=self.loss_cls,
num_class=self.num_classes,
**self.train_cfg.isr)
(flatten_labels, flatten_label_weights, flatten_bbox_targets,
flatten_bbox_weights) = all_targets
# For convenience we compute loss once instead separating by fpn level,
# so that we don't need to separate the weights by level again.
# The result should be the same
losses_cls = self.loss_cls(
flatten_cls_scores,
flatten_labels,
flatten_label_weights,
avg_factor=num_total_samples)
losses_bbox = self.loss_bbox(
flatten_bbox_preds,
flatten_bbox_targets,
flatten_bbox_weights,
avg_factor=num_total_samples)
loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
# CARL Loss
carl_cfg = self.train_cfg.get('carl', None)
if carl_cfg is not None:
loss_carl = carl_loss(
flatten_cls_scores,
flatten_labels,
flatten_bbox_preds,
flatten_bbox_targets,
self.loss_bbox,
**self.train_cfg.carl,
avg_factor=num_total_pos,
sigmoid=True,
num_class=self.num_classes)
loss_dict.update(loss_carl)
return loss_dict
| 40.24026 | 79 | 0.598031 | import torch
from mmdet.core import force_fp32, images_to_levels
from ..builder import HEADS
from ..losses import carl_loss, isr_p
from .retina_head import RetinaHead
@HEADS.register_module()
class PISARetinaHead(RetinaHead):
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self,
cls_scores,
bbox_preds,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.anchor_generator.num_levels
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, img_metas, device=device)
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
cls_reg_targets = self.get_targets(
anchor_list,
valid_flag_list,
gt_bboxes,
img_metas,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels,
return_sampling_results=True)
if cls_reg_targets is None:
return None
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets
num_total_samples = (
num_total_pos + num_total_neg if self.sampling else num_total_pos)
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
concat_anchor_list = []
for i in range(len(anchor_list)):
concat_anchor_list.append(torch.cat(anchor_list[i]))
all_anchor_list = images_to_levels(concat_anchor_list,
num_level_anchors)
num_imgs = len(img_metas)
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, label_channels)
for cls_score in cls_scores
]
flatten_cls_scores = torch.cat(
flatten_cls_scores, dim=1).reshape(-1,
flatten_cls_scores[0].size(-1))
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)
for bbox_pred in bbox_preds
]
flatten_bbox_preds = torch.cat(
flatten_bbox_preds, dim=1).view(-1, flatten_bbox_preds[0].size(-1))
flatten_labels = torch.cat(labels_list, dim=1).reshape(-1)
flatten_label_weights = torch.cat(
label_weights_list, dim=1).reshape(-1)
flatten_anchors = torch.cat(all_anchor_list, dim=1).reshape(-1, 4)
flatten_bbox_targets = torch.cat(
bbox_targets_list, dim=1).reshape(-1, 4)
flatten_bbox_weights = torch.cat(
bbox_weights_list, dim=1).reshape(-1, 4)
isr_cfg = self.train_cfg.get('isr', None)
if isr_cfg is not None:
all_targets = (flatten_labels, flatten_label_weights,
flatten_bbox_targets, flatten_bbox_weights)
with torch.no_grad():
all_targets = isr_p(
flatten_cls_scores,
flatten_bbox_preds,
all_targets,
flatten_anchors,
sampling_results_list,
bbox_coder=self.bbox_coder,
loss_cls=self.loss_cls,
num_class=self.num_classes,
**self.train_cfg.isr)
(flatten_labels, flatten_label_weights, flatten_bbox_targets,
flatten_bbox_weights) = all_targets
# The result should be the same
losses_cls = self.loss_cls(
flatten_cls_scores,
flatten_labels,
flatten_label_weights,
avg_factor=num_total_samples)
losses_bbox = self.loss_bbox(
flatten_bbox_preds,
flatten_bbox_targets,
flatten_bbox_weights,
avg_factor=num_total_samples)
loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
# CARL Loss
carl_cfg = self.train_cfg.get('carl', None)
if carl_cfg is not None:
loss_carl = carl_loss(
flatten_cls_scores,
flatten_labels,
flatten_bbox_preds,
flatten_bbox_targets,
self.loss_bbox,
**self.train_cfg.carl,
avg_factor=num_total_pos,
sigmoid=True,
num_class=self.num_classes)
loss_dict.update(loss_carl)
return loss_dict
| true | true |
f71aebb3c6779bbbbac6736bbbca965e3ddbbe88 | 4,718 | py | Python | diff_representation/model/edit_encoder/bag_of_edits_change_encoder.py | microsoft/iclr2019-learning-to-represent-edits | e5777d6aa6cdeda500cf076646177c48d1cb4622 | [
"MIT"
] | 8 | 2021-03-15T18:57:18.000Z | 2021-08-23T11:28:22.000Z | diff_representation/model/edit_encoder/bag_of_edits_change_encoder.py | microsoft/iclr2019-learning-to-represent-edits | e5777d6aa6cdeda500cf076646177c48d1cb4622 | [
"MIT"
] | null | null | null | diff_representation/model/edit_encoder/bag_of_edits_change_encoder.py | microsoft/iclr2019-learning-to-represent-edits | e5777d6aa6cdeda500cf076646177c48d1cb4622 | [
"MIT"
] | 4 | 2021-03-27T14:19:09.000Z | 2021-09-13T12:35:31.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from itertools import chain
import numpy as np
import torch
from torch import nn as nn
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from tqdm import tqdm
import sys
from diff_representation.change_entry import ChangeExample
from diff_representation.model import nn_utils
from diff_representation.model.embedder import EmbeddingTable
class BagOfEditsChangeEncoder(nn.Module):
"""project a CodeChange instance into distributed vectors"""
def __init__(self, token_embedder, vocab, **kwargs):
super(BagOfEditsChangeEncoder, self).__init__()
self.token_embedder = token_embedder
self.token_embedding_size = self.token_embedder.weight.size(1)
self.vocab = vocab
self.change_vector_size = self.token_embedding_size * 2
@property
def device(self):
return self.token_embedder.device
def forward(self, code_changes, *args, **kwargs):
"""
given the token encodings of the previous and updated code,
and the diff information (alignment between the tokens between the
previous and updated code), generate the diff representation
"""
added_tokens = []
added_token_batch_ids = []
deled_tokens = []
deled_token_batch_ids = []
for e_id, example in enumerate(code_changes):
for entry in example.change_seq:
tag, token = entry
if tag == 'ADD':
token_id = self.vocab[token]
added_tokens.append(token_id)
added_token_batch_ids.append(e_id)
elif tag == 'DEL':
token_id = self.vocab[token]
deled_tokens.append(token_id)
deled_token_batch_ids.append(e_id)
elif tag == 'REPLACE':
added_token_id = self.vocab[token[1]]
deled_token_id = self.vocab[token[0]]
added_tokens.append(added_token_id)
deled_tokens.append(deled_token_id)
added_token_batch_ids.append(e_id)
deled_token_batch_ids.append(e_id)
changed_token_ids = added_tokens + deled_tokens
changed_token_ids = torch.tensor(changed_token_ids, dtype=torch.long, device=self.device)
# (token_num, embed_size)
changed_token_embeds = self.token_embedder.weight[changed_token_ids]
added_token_embeds = changed_token_embeds[:len(added_tokens)]
deled_token_embeds = changed_token_embeds[len(added_tokens):]
added_change_embeds = torch.zeros(len(code_changes), self.token_embedding_size, dtype=torch.float,
device=self.device)
if added_token_batch_ids:
added_change_embeds = added_change_embeds.scatter_add_(0,
torch.tensor(added_token_batch_ids, device=self.device).unsqueeze(-1).expand_as(added_token_embeds),
added_token_embeds)
deled_change_embeds = torch.zeros(len(code_changes), self.token_embedding_size, dtype=torch.float,
device=self.device)
if deled_token_batch_ids:
deled_change_embeds = deled_change_embeds.scatter_add_(0,
torch.tensor(deled_token_batch_ids, device=self.device).unsqueeze(-1).expand_as(deled_token_embeds),
deled_token_embeds)
change_vectors = torch.cat([added_change_embeds, deled_change_embeds], dim=-1)
return change_vectors
def encode_code_change(self, prev_code_tokens, updated_code_tokens, code_encoder):
example = ChangeExample(prev_code_tokens, updated_code_tokens, context=None)
change_vec = self.forward([example]).data.cpu().numpy()[0]
return change_vec
def encode_code_changes(self, examples, code_encoder, batch_size=32):
"""encode each change in the list `code_changes`,
return a 2D numpy array of shape (len(code_changes), code_change_embed_dim)"""
change_vecs = []
for batch_examples in tqdm(nn_utils.batch_iter(examples, batch_size), file=sys.stdout, total=len(examples)):
batch_change_vecs = self.forward(batch_examples).data.cpu().numpy()
change_vecs.append(batch_change_vecs)
change_vecs = np.concatenate(change_vecs, axis=0)
return change_vecs
| 42.504505 | 167 | 0.636922 |
from itertools import chain
import numpy as np
import torch
from torch import nn as nn
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from tqdm import tqdm
import sys
from diff_representation.change_entry import ChangeExample
from diff_representation.model import nn_utils
from diff_representation.model.embedder import EmbeddingTable
class BagOfEditsChangeEncoder(nn.Module):
def __init__(self, token_embedder, vocab, **kwargs):
super(BagOfEditsChangeEncoder, self).__init__()
self.token_embedder = token_embedder
self.token_embedding_size = self.token_embedder.weight.size(1)
self.vocab = vocab
self.change_vector_size = self.token_embedding_size * 2
@property
def device(self):
return self.token_embedder.device
def forward(self, code_changes, *args, **kwargs):
added_tokens = []
added_token_batch_ids = []
deled_tokens = []
deled_token_batch_ids = []
for e_id, example in enumerate(code_changes):
for entry in example.change_seq:
tag, token = entry
if tag == 'ADD':
token_id = self.vocab[token]
added_tokens.append(token_id)
added_token_batch_ids.append(e_id)
elif tag == 'DEL':
token_id = self.vocab[token]
deled_tokens.append(token_id)
deled_token_batch_ids.append(e_id)
elif tag == 'REPLACE':
added_token_id = self.vocab[token[1]]
deled_token_id = self.vocab[token[0]]
added_tokens.append(added_token_id)
deled_tokens.append(deled_token_id)
added_token_batch_ids.append(e_id)
deled_token_batch_ids.append(e_id)
changed_token_ids = added_tokens + deled_tokens
changed_token_ids = torch.tensor(changed_token_ids, dtype=torch.long, device=self.device)
changed_token_embeds = self.token_embedder.weight[changed_token_ids]
added_token_embeds = changed_token_embeds[:len(added_tokens)]
deled_token_embeds = changed_token_embeds[len(added_tokens):]
added_change_embeds = torch.zeros(len(code_changes), self.token_embedding_size, dtype=torch.float,
device=self.device)
if added_token_batch_ids:
added_change_embeds = added_change_embeds.scatter_add_(0,
torch.tensor(added_token_batch_ids, device=self.device).unsqueeze(-1).expand_as(added_token_embeds),
added_token_embeds)
deled_change_embeds = torch.zeros(len(code_changes), self.token_embedding_size, dtype=torch.float,
device=self.device)
if deled_token_batch_ids:
deled_change_embeds = deled_change_embeds.scatter_add_(0,
torch.tensor(deled_token_batch_ids, device=self.device).unsqueeze(-1).expand_as(deled_token_embeds),
deled_token_embeds)
change_vectors = torch.cat([added_change_embeds, deled_change_embeds], dim=-1)
return change_vectors
def encode_code_change(self, prev_code_tokens, updated_code_tokens, code_encoder):
example = ChangeExample(prev_code_tokens, updated_code_tokens, context=None)
change_vec = self.forward([example]).data.cpu().numpy()[0]
return change_vec
def encode_code_changes(self, examples, code_encoder, batch_size=32):
change_vecs = []
for batch_examples in tqdm(nn_utils.batch_iter(examples, batch_size), file=sys.stdout, total=len(examples)):
batch_change_vecs = self.forward(batch_examples).data.cpu().numpy()
change_vecs.append(batch_change_vecs)
change_vecs = np.concatenate(change_vecs, axis=0)
return change_vecs
| true | true |
f71aebc2afaca9f74e0aad77ccea915a36978cb2 | 1,995 | py | Python | examples/classify_capture.py | tbeatty/edgetpu | 14237f65ba07b7b1d8287e9f60dd20c88562871a | [
"Apache-2.0"
] | 10 | 2019-04-12T08:02:12.000Z | 2020-12-27T13:53:37.000Z | examples/classify_capture.py | tbeatty/edgetpu | 14237f65ba07b7b1d8287e9f60dd20c88562871a | [
"Apache-2.0"
] | 1 | 2019-04-03T12:22:55.000Z | 2019-04-04T10:42:35.000Z | examples/classify_capture.py | tbeatty/edgetpu | 14237f65ba07b7b1d8287e9f60dd20c88562871a | [
"Apache-2.0"
] | 3 | 2019-04-25T13:44:36.000Z | 2021-02-17T06:00:56.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A demo to classify Raspberry Pi camera stream."""
import argparse
import io
import time
from edgetpu.classification.engine import ClassificationEngine
from edgetpu.utils import dataset_utils
import numpy as np
import picamera
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model', help='File path of Tflite model.', required=True)
parser.add_argument('--label', help='File path of label file.', required=True)
args = parser.parse_args()
labels = dataset_utils.read_label_file(args.label)
engine = ClassificationEngine(args.model)
with picamera.PiCamera() as camera:
camera.resolution = (640, 480)
camera.framerate = 30
_, height, width, _ = engine.get_input_tensor_shape()
camera.start_preview()
try:
stream = io.BytesIO()
for _ in camera.capture_continuous(
stream, format='rgb', use_video_port=True, resize=(width, height)):
stream.truncate()
stream.seek(0)
input_tensor = np.frombuffer(stream.getvalue(), dtype=np.uint8)
start_ms = time.time()
results = engine.classify_with_input_tensor(input_tensor, top_k=1)
elapsed_ms = time.time() - start_ms
if results:
camera.annotate_text = '%s %.2f\n%.2fms' % (
labels[results[0][0]], results[0][1], elapsed_ms * 1000.0)
finally:
camera.stop_preview()
if __name__ == '__main__':
main()
| 33.25 | 80 | 0.702757 |
import argparse
import io
import time
from edgetpu.classification.engine import ClassificationEngine
from edgetpu.utils import dataset_utils
import numpy as np
import picamera
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model', help='File path of Tflite model.', required=True)
parser.add_argument('--label', help='File path of label file.', required=True)
args = parser.parse_args()
labels = dataset_utils.read_label_file(args.label)
engine = ClassificationEngine(args.model)
with picamera.PiCamera() as camera:
camera.resolution = (640, 480)
camera.framerate = 30
_, height, width, _ = engine.get_input_tensor_shape()
camera.start_preview()
try:
stream = io.BytesIO()
for _ in camera.capture_continuous(
stream, format='rgb', use_video_port=True, resize=(width, height)):
stream.truncate()
stream.seek(0)
input_tensor = np.frombuffer(stream.getvalue(), dtype=np.uint8)
start_ms = time.time()
results = engine.classify_with_input_tensor(input_tensor, top_k=1)
elapsed_ms = time.time() - start_ms
if results:
camera.annotate_text = '%s %.2f\n%.2fms' % (
labels[results[0][0]], results[0][1], elapsed_ms * 1000.0)
finally:
camera.stop_preview()
if __name__ == '__main__':
main()
| true | true |
f71aec18d787da6ff5ca5c22add4823da1992bf0 | 718 | py | Python | Solution/0081.Search_in_Rotated_Sorted_Array_II/0081.Search_in_Rotated_Sorted_Array_II.py | xleslie/LeetCode | 0af08817b3922e1bbc558091963fd4ff65a506ea | [
"MIT"
] | null | null | null | Solution/0081.Search_in_Rotated_Sorted_Array_II/0081.Search_in_Rotated_Sorted_Array_II.py | xleslie/LeetCode | 0af08817b3922e1bbc558091963fd4ff65a506ea | [
"MIT"
] | null | null | null | Solution/0081.Search_in_Rotated_Sorted_Array_II/0081.Search_in_Rotated_Sorted_Array_II.py | xleslie/LeetCode | 0af08817b3922e1bbc558091963fd4ff65a506ea | [
"MIT"
] | null | null | null | class Solution:
def search(self, nums: List[int], target: int) -> bool:
i,j=0,len(nums)
while i<j:
m=i+(j-i)//2
if nums[m]==target: return True
if nums[m]>nums[i]:
if target>=nums[i] and target<nums[m]:
j=m
else:
i=m+1
elif nums[m]<nums[i]:
if target>nums[m] and target <=nums[j-1]:
i=m+1
else:
j=m
elif nums[m]==nums[i]:
while i<j and nums[i] == nums[m]:
i+=1
while i<j and nums[j-1] ==nums[m]:
j-=1
return False
| 31.217391 | 59 | 0.367688 | class Solution:
def search(self, nums: List[int], target: int) -> bool:
i,j=0,len(nums)
while i<j:
m=i+(j-i)//2
if nums[m]==target: return True
if nums[m]>nums[i]:
if target>=nums[i] and target<nums[m]:
j=m
else:
i=m+1
elif nums[m]<nums[i]:
if target>nums[m] and target <=nums[j-1]:
i=m+1
else:
j=m
elif nums[m]==nums[i]:
while i<j and nums[i] == nums[m]:
i+=1
while i<j and nums[j-1] ==nums[m]:
j-=1
return False
| true | true |
f71aedc6bc7df22a8c1ea544a471fb0c4efcfc27 | 1,844 | py | Python | tvm_test/run_simple_mod_op2_pth.py | junarwohn/tvm | 96c2e06cd063a695b3b485f2bdf8875df55fff1a | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | tvm_test/run_simple_mod_op2_pth.py | junarwohn/tvm | 96c2e06cd063a695b3b485f2bdf8875df55fff1a | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | tvm_test/run_simple_mod_op2_pth.py | junarwohn/tvm | 96c2e06cd063a695b3b485f2bdf8875df55fff1a | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | import tvm
from tvm import relay
from tvm import relay
from tvm.runtime.vm import VirtualMachine
from tvm.contrib.download import download_testdata
from SimpleModel import Net
import numpy as np
import cv2
# PyTorch imports
import torch
import torchvision
# Time library for speed check
import time
in_size = 32
input_shape = (1, 3, in_size, in_size)
def do_trace(model, inp):
model_trace = torch.jit.trace(model, inp)
model_trace.eval()
return model_trace
# model_func = torchvision.models.detection.maskrcnn_resnet50_fpn
# model = TraceWrapper(model_func(pretrained=True))
model = Net()
model.load_state_dict(torch.load('./simple_mod.pth'))
model.eval()
inp = torch.Tensor(np.random.uniform(0.0, 250.0, size=(1, 3, in_size, in_size)))
with torch.no_grad():
out = model(inp)
script_module = do_trace(model, inp)
img_url = (
"https://raw.githubusercontent.com/dmlc/web-data/" "master/gluoncv/detection/street_small.jpg"
)
img_path = download_testdata(img_url, "test_street_small.jpg", module="data")
img = cv2.imread(img_path).astype("float32")
img = cv2.resize(img, (in_size, in_size))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.transpose(img / 255.0, [2, 0, 1])
img = np.expand_dims(img, axis=0)
input_name = "input0"
shape_list = [(input_name, input_shape)]
mod, params = relay.frontend.from_pytorch(script_module, shape_list)
target = "llvm"
with tvm.transform.PassContext(opt_level=2, disabled_pass=["FoldScaleAxis"]):
vm_exec = relay.vm.compile(mod, target=target, params=params)
# dev = tvm.cuda()
dev = tvm.cpu()
vm = VirtualMachine(vm_exec, dev)
vm.set_input("main", **{input_name: img})
inference_start = time.time()
tvm_res = vm.run()
inference_end = time.time()
inference_time_tvm = inference_end - inference_start
print("Infernece Time : {}".format(inference_time_tvm))
| 25.260274 | 98 | 0.741323 | import tvm
from tvm import relay
from tvm import relay
from tvm.runtime.vm import VirtualMachine
from tvm.contrib.download import download_testdata
from SimpleModel import Net
import numpy as np
import cv2
import torch
import torchvision
import time
in_size = 32
input_shape = (1, 3, in_size, in_size)
def do_trace(model, inp):
model_trace = torch.jit.trace(model, inp)
model_trace.eval()
return model_trace
model = Net()
model.load_state_dict(torch.load('./simple_mod.pth'))
model.eval()
inp = torch.Tensor(np.random.uniform(0.0, 250.0, size=(1, 3, in_size, in_size)))
with torch.no_grad():
out = model(inp)
script_module = do_trace(model, inp)
img_url = (
"https://raw.githubusercontent.com/dmlc/web-data/" "master/gluoncv/detection/street_small.jpg"
)
img_path = download_testdata(img_url, "test_street_small.jpg", module="data")
img = cv2.imread(img_path).astype("float32")
img = cv2.resize(img, (in_size, in_size))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.transpose(img / 255.0, [2, 0, 1])
img = np.expand_dims(img, axis=0)
input_name = "input0"
shape_list = [(input_name, input_shape)]
mod, params = relay.frontend.from_pytorch(script_module, shape_list)
target = "llvm"
with tvm.transform.PassContext(opt_level=2, disabled_pass=["FoldScaleAxis"]):
vm_exec = relay.vm.compile(mod, target=target, params=params)
dev = tvm.cpu()
vm = VirtualMachine(vm_exec, dev)
vm.set_input("main", **{input_name: img})
inference_start = time.time()
tvm_res = vm.run()
inference_end = time.time()
inference_time_tvm = inference_end - inference_start
print("Infernece Time : {}".format(inference_time_tvm))
| true | true |
f71aeff53f61023328ab2b142760b44cd1843c8c | 6,750 | py | Python | tests/test_etdrk4fxcy.py | liuyxpp/chebpy | 05a9492d0d78591a39923e4a85a0f24bcc79ae4f | [
"BSD-3-Clause"
] | null | null | null | tests/test_etdrk4fxcy.py | liuyxpp/chebpy | 05a9492d0d78591a39923e4a85a0f24bcc79ae4f | [
"BSD-3-Clause"
] | null | null | null | tests/test_etdrk4fxcy.py | liuyxpp/chebpy | 05a9492d0d78591a39923e4a85a0f24bcc79ae4f | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#/usr/bin/env python
import numpy as np
import matplotlib.pylab as plt
from timer import Timer
from chebpy import ETDRK4FxCy, ETDRK4FxCy2, BC, ETDRK4
from chebpy import ROBIN, DIRICHLET
def test_etdrk4fxcy():
'''
The test function is
u = e^[f(x,y) - t]
where
f(x,y) = h(x) + g(y)
Assume it is the solution of following PDE
du/dt = (d^2/dx^2 + d^2/dy^2) u - w(x,y)u
in the domain [0,Lx]x[0,Ly] for time t=0 to t=1,
with boundary conditions
u(x+Lx,y,t) = u(x,y,t) # periodic in x direction
d/dy[u(x,y=0,t)] = ka u(y=0)
d/dy[u(x,y=Ly,t)] = kb u(y=Ly)
To generate a suitable solution, we assume
h(x) = sin(x)
h_x = dh/dx = cos(x)
h_xx = d^2h/dx^2 = -sin(x)
since it is periodic in x direction.
The corresponding w(x,y) is
w(x,y) = h_xx + g_yy + (h_x)^2 + (g_y)^2 + 1
1. For homogeneous NBC (ka=kb=0), a suitable g(y) is
g(y) = Ay^2(2y-3)/6
g_y = A(y^2-y) # g_y(y=0)=0, g_y(y=1)=0
g_yy = A(2*y-1)
where A is a positive constant.
Lx = 2*pi, Ly = 1.0, Nx = 64, Ny =32, Ns = 21
is a good parameter set. Note the time step ds = 1/(Ns-1) = 0.05 is very
large.
2. For homogeneous DBC, an approximate g(y) is
g(y) = -A(y-1)^2
g_y = -2A(y-1)
g_yy = -2A
where A is a positive and large constant.
Lx = 2*pi, Ly = 2.0, Nx = 64, Ny =32, Ns = 101
is a good parameter set.
3. For RBC, g(y) is given by
g(y) = -Ay
g_y = -A # ka=kb=-A
g_yy = 0
A is a positive constant.
Numerical result is different than the analytical one.
'''
Lx = 2*np.pi # x [0, Lx]
Nx = 64
Ly = 1.0 # y [0, Ly]
Ny = 127
Ns = 101
ds = 1. / (Ns - 1)
# Periodic in x direction, Fourier
xx = np.arange(Nx) * Lx / Nx
# Non-periodic in y direction, Chebyshev
ii = np.arange(Ny+1)
yy = np.cos(np.pi * ii / Ny) # yy [-1, 1]
yy = 0.5 * (yy + 1) * Ly # mapping to [0, Ly]
w = np.zeros([Nx,Ny+1])
A = 1.0
q = np.zeros([Ns, Nx, Ny+1])
q_exact = np.zeros([Nx, Ny+1])
#q[0] = 1.
for i in xrange(Nx):
for j in xrange(Ny+1):
x = xx[i]
y = yy[j]
# RBC
#q_exact[i,j] = np.exp(-A*y + np.sin(x) - 1)
#q[0,i,j] = np.exp(-A*y + np.sin(x))
#w[i,j] = np.cos(x)**2 - np.sin(x) + A**2 + 1
# homogeneous NBC
q_exact[i,j] = np.exp(A*y**2*(2*y-3)/6 + np.sin(x) - 1)
q[0,i,j] = np.exp(A*y**2*(2*y-3)/6 + np.sin(x))
w[i,j] = (A*y*(y-1))**2 + np.cos(x)**2 - np.sin(x) + A*(2*y-1) + 1
# homogeneous DBC
#q[0,i,j] = np.exp(-A*(y-1)**2 + np.sin(x))
#q_exact[i,j] = np.exp(-A*(y-1)**2 + np.sin(x) + 1)
#w[i, j] = np.cos(x)**2 - np.sin(x) + 4*A**2 + (2*A*(y-1))**2 + 1
# Fredrickson
#sech = 1. / np.cosh(0.25*(6*y[j]-3*Ly))
#w[i,j] = (1 - 2*sech**2)*(np.sin(2*np.pi*x[i]/Lx)+1)
#w[i,j] = (1 - 2*sech**2)
x = xx; y = yy
plt.imshow(w)
plt.xlabel('w')
plt.show()
plt.plot(x,w[:,Ny/2])
plt.xlabel('w(x)')
plt.show()
plt.plot(y,w[Nx/4,:])
plt.xlabel('w(y)')
plt.show()
# DBC
#lbc = BC(DIRICHLET, [0.0, 1.0, 0.0])
#rbc = BC(DIRICHLET, [0.0, 1.0, 0.0])
# RBC
#lbc = BC(ROBIN, [1.0, A, 0.0])
#rbc = BC(ROBIN, [1.0, A, 0.0])
# NBC
lbc = BC(ROBIN, [1.0, 0, 0.0])
rbc = BC(ROBIN, [1.0, 0, 0.0])
#q_solver = ETDRK4FxCy(Lx, Ly, Nx, Ny, Ns, h=ds, lbc=lbc, rbc=rbc)
q_solver = ETDRK4FxCy2(Lx, Ly, Nx, Ny, Ns, h=ds, lbc=lbc, rbc=rbc)
M = 100 # Took 1117.6 x 4 seconds for cpu one core
with Timer() as t:
for m in xrange(M):
q1 = q_solver.solve(w, q[0], q)
print "100 runs took ", t.secs, " seconds."
print 'Error =', np.max(np.abs(q1-q_exact))
plt.imshow(q[0])
plt.xlabel('q_0')
plt.show()
plt.imshow(q1)
plt.xlabel('q_solution')
plt.show()
plt.imshow(q_exact)
plt.xlabel('q_exact')
plt.show()
plt.plot(x,q[0,:,Ny/2], label='q0')
plt.plot(x,q1[:,Ny/2], label='q_solution')
plt.plot(x,q_exact[:,Ny/2], label='q_exact')
plt.legend(loc='best')
plt.xlabel('q[:,Ny/2]')
plt.show()
plt.plot(y,q[0,Nx/4,:], label='q0')
plt.plot(y,q1[Nx/4,:], label='q_solution')
plt.plot(y,q_exact[Nx/4,:], label='q_exact')
plt.legend(loc='best')
plt.xlabel('q[Nx/4,:]')
plt.show()
plt.plot(y,q[0,Nx*3/4,:], label='q0')
plt.plot(y,q1[Nx*3/4,:], label='q_solution')
plt.plot(y,q_exact[Nx*3/4,:], label='q_exact')
plt.legend(loc='best')
plt.xlabel('q[Nx*3/4,:]')
plt.show()
exit()
# Check with ETDRK4
sech = 1. / np.cosh(0.25*(6*y-3*Ly))
w1 = 1 - 2*sech**2
plt.plot(y,w1)
plt.show()
q = np.zeros([Ns, Ny+1])
q[0] = 1.
q_solver = ETDRK4(Ly,Ny,Ns,h=ds,lbc=lbc,rbc=rbc)
q1, y = q_solver.solve(w1, q[0], q)
plt.plot(y,q1)
plt.show()
def test_etdrk4():
'''
The test case is according to R. C. Daileda Lecture notes.
du/dt = (1/25) u_xx , x@(0,3)
with boundary conditions:
u(0,t) = 0
u_x(3,t) = -(1/2) u(3,t)
u(x,0) = 100*(1-x/3)
Conclusion:
We find that the numerical solution is much more accurate than the five
term approximation of the exact analytical solution.
'''
Nx = 64
Lx = 3
t = 1.
Ns = 101
ds = t/(Ns - 1)
ii = np.arange(Nx+1)
x = np.cos(np.pi * ii / Nx) # yy [-1, 1]
x = 0.5 * (x + 1) * Lx # mapping to [0, Ly]
w = np.zeros(Nx+1)
q = np.zeros([Ns, Nx+1])
q[0] = 100*(1-x/3)
# The approximation of exact solution by first 5 terms
q_exact = 47.0449*np.exp(-0.0210*t)*np.sin(0.7249*x) + \
45.1413*np.exp(-0.1113*t)*np.sin(1.6679*x) + \
21.3586*np.exp(-0.2872*t)*np.sin(2.6795*x) + \
19.3403*np.exp(-0.5505*t)*np.sin(3.7098*x) + \
12.9674*np.exp(-0.9015*t)*np.sin(4.7474*x)
lbc = BC(DIRICHLET, [0,1,0])
rbc = BC(ROBIN, [1.,0.5,0])
q_solver = ETDRK4(Lx,Nx,Ns,h=ds,c=1./25,lbc=lbc,rbc=rbc)
q1, x = q_solver.solve(w, q[0], q)
plt.plot(x, q[0], label='q_0')
plt.plot(x, q1, label='q_solution')
plt.plot(x, q_exact, label='q_exact')
plt.legend(loc='best')
plt.show()
def check(u):
'''
The PDE is
du/dt = u_xx + u_yy - wu
Calculate the residual using FD scheme.
R(x) = (u(x+h)
'''
pass
if __name__ == '__main__':
test_etdrk4fxcy()
#test_etdrk4()
| 30.681818 | 79 | 0.493333 |
import numpy as np
import matplotlib.pylab as plt
from timer import Timer
from chebpy import ETDRK4FxCy, ETDRK4FxCy2, BC, ETDRK4
from chebpy import ROBIN, DIRICHLET
def test_etdrk4fxcy():
'''
The test function is
u = e^[f(x,y) - t]
where
f(x,y) = h(x) + g(y)
Assume it is the solution of following PDE
du/dt = (d^2/dx^2 + d^2/dy^2) u - w(x,y)u
in the domain [0,Lx]x[0,Ly] for time t=0 to t=1,
with boundary conditions
u(x+Lx,y,t) = u(x,y,t) # periodic in x direction
d/dy[u(x,y=0,t)] = ka u(y=0)
d/dy[u(x,y=Ly,t)] = kb u(y=Ly)
To generate a suitable solution, we assume
h(x) = sin(x)
h_x = dh/dx = cos(x)
h_xx = d^2h/dx^2 = -sin(x)
since it is periodic in x direction.
The corresponding w(x,y) is
w(x,y) = h_xx + g_yy + (h_x)^2 + (g_y)^2 + 1
1. For homogeneous NBC (ka=kb=0), a suitable g(y) is
g(y) = Ay^2(2y-3)/6
g_y = A(y^2-y) # g_y(y=0)=0, g_y(y=1)=0
g_yy = A(2*y-1)
where A is a positive constant.
Lx = 2*pi, Ly = 1.0, Nx = 64, Ny =32, Ns = 21
is a good parameter set. Note the time step ds = 1/(Ns-1) = 0.05 is very
large.
2. For homogeneous DBC, an approximate g(y) is
g(y) = -A(y-1)^2
g_y = -2A(y-1)
g_yy = -2A
where A is a positive and large constant.
Lx = 2*pi, Ly = 2.0, Nx = 64, Ny =32, Ns = 101
is a good parameter set.
3. For RBC, g(y) is given by
g(y) = -Ay
g_y = -A # ka=kb=-A
g_yy = 0
A is a positive constant.
Numerical result is different than the analytical one.
'''
Lx = 2*np.pi
Nx = 64
Ly = 1.0
Ny = 127
Ns = 101
ds = 1. / (Ns - 1)
xx = np.arange(Nx) * Lx / Nx
ii = np.arange(Ny+1)
yy = np.cos(np.pi * ii / Ny)
yy = 0.5 * (yy + 1) * Ly
w = np.zeros([Nx,Ny+1])
A = 1.0
q = np.zeros([Ns, Nx, Ny+1])
q_exact = np.zeros([Nx, Ny+1])
for i in xrange(Nx):
for j in xrange(Ny+1):
x = xx[i]
y = yy[j]
q_exact[i,j] = np.exp(A*y**2*(2*y-3)/6 + np.sin(x) - 1)
q[0,i,j] = np.exp(A*y**2*(2*y-3)/6 + np.sin(x))
w[i,j] = (A*y*(y-1))**2 + np.cos(x)**2 - np.sin(x) + A*(2*y-1) + 1
x = xx; y = yy
plt.imshow(w)
plt.xlabel('w')
plt.show()
plt.plot(x,w[:,Ny/2])
plt.xlabel('w(x)')
plt.show()
plt.plot(y,w[Nx/4,:])
plt.xlabel('w(y)')
plt.show()
lbc = BC(ROBIN, [1.0, 0, 0.0])
rbc = BC(ROBIN, [1.0, 0, 0.0])
q_solver = ETDRK4FxCy2(Lx, Ly, Nx, Ny, Ns, h=ds, lbc=lbc, rbc=rbc)
M = 100
with Timer() as t:
for m in xrange(M):
q1 = q_solver.solve(w, q[0], q)
print "100 runs took ", t.secs, " seconds."
print 'Error =', np.max(np.abs(q1-q_exact))
plt.imshow(q[0])
plt.xlabel('q_0')
plt.show()
plt.imshow(q1)
plt.xlabel('q_solution')
plt.show()
plt.imshow(q_exact)
plt.xlabel('q_exact')
plt.show()
plt.plot(x,q[0,:,Ny/2], label='q0')
plt.plot(x,q1[:,Ny/2], label='q_solution')
plt.plot(x,q_exact[:,Ny/2], label='q_exact')
plt.legend(loc='best')
plt.xlabel('q[:,Ny/2]')
plt.show()
plt.plot(y,q[0,Nx/4,:], label='q0')
plt.plot(y,q1[Nx/4,:], label='q_solution')
plt.plot(y,q_exact[Nx/4,:], label='q_exact')
plt.legend(loc='best')
plt.xlabel('q[Nx/4,:]')
plt.show()
plt.plot(y,q[0,Nx*3/4,:], label='q0')
plt.plot(y,q1[Nx*3/4,:], label='q_solution')
plt.plot(y,q_exact[Nx*3/4,:], label='q_exact')
plt.legend(loc='best')
plt.xlabel('q[Nx*3/4,:]')
plt.show()
exit()
sech = 1. / np.cosh(0.25*(6*y-3*Ly))
w1 = 1 - 2*sech**2
plt.plot(y,w1)
plt.show()
q = np.zeros([Ns, Ny+1])
q[0] = 1.
q_solver = ETDRK4(Ly,Ny,Ns,h=ds,lbc=lbc,rbc=rbc)
q1, y = q_solver.solve(w1, q[0], q)
plt.plot(y,q1)
plt.show()
def test_etdrk4():
'''
The test case is according to R. C. Daileda Lecture notes.
du/dt = (1/25) u_xx , x@(0,3)
with boundary conditions:
u(0,t) = 0
u_x(3,t) = -(1/2) u(3,t)
u(x,0) = 100*(1-x/3)
Conclusion:
We find that the numerical solution is much more accurate than the five
term approximation of the exact analytical solution.
'''
Nx = 64
Lx = 3
t = 1.
Ns = 101
ds = t/(Ns - 1)
ii = np.arange(Nx+1)
x = np.cos(np.pi * ii / Nx)
x = 0.5 * (x + 1) * Lx
w = np.zeros(Nx+1)
q = np.zeros([Ns, Nx+1])
q[0] = 100*(1-x/3)
q_exact = 47.0449*np.exp(-0.0210*t)*np.sin(0.7249*x) + \
45.1413*np.exp(-0.1113*t)*np.sin(1.6679*x) + \
21.3586*np.exp(-0.2872*t)*np.sin(2.6795*x) + \
19.3403*np.exp(-0.5505*t)*np.sin(3.7098*x) + \
12.9674*np.exp(-0.9015*t)*np.sin(4.7474*x)
lbc = BC(DIRICHLET, [0,1,0])
rbc = BC(ROBIN, [1.,0.5,0])
q_solver = ETDRK4(Lx,Nx,Ns,h=ds,c=1./25,lbc=lbc,rbc=rbc)
q1, x = q_solver.solve(w, q[0], q)
plt.plot(x, q[0], label='q_0')
plt.plot(x, q1, label='q_solution')
plt.plot(x, q_exact, label='q_exact')
plt.legend(loc='best')
plt.show()
def check(u):
'''
The PDE is
du/dt = u_xx + u_yy - wu
Calculate the residual using FD scheme.
R(x) = (u(x+h)
'''
pass
if __name__ == '__main__':
test_etdrk4fxcy()
| false | true |
f71af005c4808726491024543d12346686c50421 | 3,691 | py | Python | travel_time_visualization/server.py | rogerfitz/tutorials | dae6470bad63b71e755caaff0b69893f5c9a1d63 | [
"MIT"
] | 45 | 2017-07-13T23:20:54.000Z | 2022-02-25T16:48:52.000Z | flask_viz_server/server.py | rogerfitz/tutorials | dae6470bad63b71e755caaff0b69893f5c9a1d63 | [
"MIT"
] | 51 | 2017-07-28T13:48:26.000Z | 2021-11-29T06:37:41.000Z | flask_viz_server/server.py | rogerfitz/tutorials | dae6470bad63b71e755caaff0b69893f5c9a1d63 | [
"MIT"
] | 38 | 2017-07-13T15:48:30.000Z | 2022-02-26T04:12:06.000Z | from flask import Flask, jsonify,render_template,request
from config import API_KEY
import datetime
from collections import defaultdict
import requests
import pandas as pd
import sys
import logging
from itertools import repeat
app = Flask(__name__)
gunicorn_error_logger = logging.getLogger('gunicorn.error')
app.logger.handlers.extend(gunicorn_error_logger.handlers)
app.logger.setLevel(logging.DEBUG)
from multiprocessing.dummy import Pool as ThreadPool
pool = ThreadPool(20)
BASE_URL="https://maps.googleapis.com/maps/api/"
app.logger.debug(datetime.datetime.fromtimestamp(1498924020))
class GAPIError(Exception):
status_code = 31337
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
def makeRequest(url, API_KEY):
url+="&key=%s"%API_KEY
return requests.get(url).json()['rows'][0]['elements'][0]['duration_in_traffic']['value']
def getDistanceMatrix(origin,destination,mode,departure_time,traffic_model, API_KEY):
#UTC Time
url=BASE_URL+"distancematrix/json?"
params="origins=%s&destinations=%s&mode=%s&departure_time=%s&traffic_model=%s"%(origin,destination,mode,departure_time,traffic_model)
return makeRequest(url+params, API_KEY)
def getNearest(dt,offset):
return dt + (datetime.datetime.min - dt) % datetime.timedelta(minutes=offset)
def getChartData(starting_address,destination_address, leave_after, hours_to_grab,API_KEY,OFFSET=15):
start_date=getNearest(leave_after,15)
request_times=defaultdict(dict)
dts=[int(leave_after.timestamp())]
for dt in (start_date + datetime.timedelta(minutes=offset) for offset in range(0,60*hours_to_grab,OFFSET)):
dts.append(int(dt.timestamp()))
request_times={}
for traffic_model in ["best_guess","pessimistic","optimistic"]:
results=pool.starmap(
getDistanceMatrix, zip(repeat(starting_address),repeat(destination_address),repeat("car"),dts,repeat(traffic_model), repeat(API_KEY))
)
request_times[traffic_model]=results
request_times["index"]=dts
travel_times=pd.DataFrame.from_dict(request_times).set_index("index")/60
viz_df=travel_times.reset_index()
viz_df['x']=viz_df['index']*1000#Add milliseconds for JS datetime
del viz_df['index']
viz_json=viz_df.to_dict(orient="list")
#to c3 Columns
columns=[]
for col,vals in viz_json.items():
if col!="x":
vals=[round(x) for x in vals]
columns.append([col]+vals)
return columns
@app.route("/")
def index():
return render_template('index.html', API_KEY=API_KEY)
@app.route('/data')
def data():
app.logger.debug(request.args)
leaveAfter=request.args.get("leaveAfter")
leaveAfter=datetime.datetime.fromtimestamp(int(leaveAfter)/1000)
USERS_API_KEY=request.args.get("API_KEY",default=API_KEY)
now=datetime.datetime.now()
if leaveAfter<now:
leaveAfter=now
try:
response=getChartData(request.args.get("startingAddress"),request.args.get("destinationAddress"),leaveAfter,8, USERS_API_KEY)
return jsonify(response)
except:
raise GAPIError("API Key no longer valid", status_code=31337)
@app.errorhandler(GAPIError)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
| 35.490385 | 145 | 0.715253 | from flask import Flask, jsonify,render_template,request
from config import API_KEY
import datetime
from collections import defaultdict
import requests
import pandas as pd
import sys
import logging
from itertools import repeat
app = Flask(__name__)
gunicorn_error_logger = logging.getLogger('gunicorn.error')
app.logger.handlers.extend(gunicorn_error_logger.handlers)
app.logger.setLevel(logging.DEBUG)
from multiprocessing.dummy import Pool as ThreadPool
pool = ThreadPool(20)
BASE_URL="https://maps.googleapis.com/maps/api/"
app.logger.debug(datetime.datetime.fromtimestamp(1498924020))
class GAPIError(Exception):
status_code = 31337
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
def makeRequest(url, API_KEY):
url+="&key=%s"%API_KEY
return requests.get(url).json()['rows'][0]['elements'][0]['duration_in_traffic']['value']
def getDistanceMatrix(origin,destination,mode,departure_time,traffic_model, API_KEY):
url=BASE_URL+"distancematrix/json?"
params="origins=%s&destinations=%s&mode=%s&departure_time=%s&traffic_model=%s"%(origin,destination,mode,departure_time,traffic_model)
return makeRequest(url+params, API_KEY)
def getNearest(dt,offset):
return dt + (datetime.datetime.min - dt) % datetime.timedelta(minutes=offset)
def getChartData(starting_address,destination_address, leave_after, hours_to_grab,API_KEY,OFFSET=15):
start_date=getNearest(leave_after,15)
request_times=defaultdict(dict)
dts=[int(leave_after.timestamp())]
for dt in (start_date + datetime.timedelta(minutes=offset) for offset in range(0,60*hours_to_grab,OFFSET)):
dts.append(int(dt.timestamp()))
request_times={}
for traffic_model in ["best_guess","pessimistic","optimistic"]:
results=pool.starmap(
getDistanceMatrix, zip(repeat(starting_address),repeat(destination_address),repeat("car"),dts,repeat(traffic_model), repeat(API_KEY))
)
request_times[traffic_model]=results
request_times["index"]=dts
travel_times=pd.DataFrame.from_dict(request_times).set_index("index")/60
viz_df=travel_times.reset_index()
viz_df['x']=viz_df['index']*1000
del viz_df['index']
viz_json=viz_df.to_dict(orient="list")
columns=[]
for col,vals in viz_json.items():
if col!="x":
vals=[round(x) for x in vals]
columns.append([col]+vals)
return columns
@app.route("/")
def index():
return render_template('index.html', API_KEY=API_KEY)
@app.route('/data')
def data():
app.logger.debug(request.args)
leaveAfter=request.args.get("leaveAfter")
leaveAfter=datetime.datetime.fromtimestamp(int(leaveAfter)/1000)
USERS_API_KEY=request.args.get("API_KEY",default=API_KEY)
now=datetime.datetime.now()
if leaveAfter<now:
leaveAfter=now
try:
response=getChartData(request.args.get("startingAddress"),request.args.get("destinationAddress"),leaveAfter,8, USERS_API_KEY)
return jsonify(response)
except:
raise GAPIError("API Key no longer valid", status_code=31337)
@app.errorhandler(GAPIError)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
| true | true |
f71af0636d15e0878c031743b0f73cf871004237 | 19,689 | py | Python | paddlenlp/transformers/bert/tokenizer.py | Leedoo/PaddleNLP | ac3a6165e5eb6d638a4165709fd6cf91c11077c7 | [
"Apache-2.0"
] | 3 | 2021-09-06T11:27:49.000Z | 2021-11-09T08:19:00.000Z | paddlenlp/transformers/bert/tokenizer.py | narrowser/PaddleNLP | fd740cb7a9d83b91116d3ad9cf6b4e3a683481f4 | [
"Apache-2.0"
] | null | null | null | paddlenlp/transformers/bert/tokenizer.py | narrowser/PaddleNLP | fd740cb7a9d83b91116d3ad9cf6b4e3a683481f4 | [
"Apache-2.0"
] | 4 | 2021-08-23T07:46:06.000Z | 2021-09-23T08:37:03.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import io
import json
import os
import six
import unicodedata
from .. import PretrainedTokenizer
from ..tokenizer_utils import convert_to_unicode, whitespace_tokenize, _is_whitespace, _is_control, _is_punctuation
__all__ = ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer']
class BasicTokenizer(object):
"""
Runs basic tokenization (punctuation splitting, lower casing, etc.).
Args:
do_lower_case (bool): Whether the text strips accents and convert to
lower case. If you use the BERT Pretrained model, lower is set to
Flase when using the cased model, otherwise it is set to True.
Default: True.
"""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer."""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""
Tokenizes a piece of text using basic tokenizer.
Args:
text (str): A piece of text.
Returns:
list(str): A list of tokens.
"""
text = convert_to_unicode(text)
text = self._clean_text(text)
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""
Strips accents from a piece of text.
"""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""
Splits punctuation on a piece of text.
"""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""
Adds whitespace around any CJK character.
"""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""
Checks whether CP is the codepoint of a CJK character.
"""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""
Performs invalid character removal and whitespace cleanup on text.
"""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""
Runs WordPiece tokenization.
Args:
vocab (Vocab|dict): Vocab of the word piece tokenizer.
unk_token (str): A specific token to replace all unkown tokens.
max_input_chars_per_word (int): If a word's length is more than
max_input_chars_per_word, it will be dealt as unknown word.
Default: 100.
"""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""
Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
list (str): A list of wordpiece tokens.
Example:
input = "unaffable"
output = ["un", "##aff", "##able"]
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
class BertTokenizer(PretrainedTokenizer):
"""
Constructs a BERT tokenizer. It uses a basic tokenizer to do punctuation
splitting, lower casing and so on, and follows a WordPiece tokenizer to
tokenize as subwords.
Args:
vocab_file (str): file path of the vocabulary
do_lower_case (bool): Whether the text strips accents and convert to
lower case. If you use the BERT pretrained model, lower is set to
Flase when using the cased model, otherwise it is set to True.
Default: True.
unk_token (str): The special token for unkown words. Default: "[UNK]".
sep_token (str): The special token for separator token . Default: "[SEP]".
pad_token (str): The special token for padding. Default: "[PAD]".
cls_token (str): The special token for cls. Default: "[CLS]".
mask_token (str): The special token for mask. Default: "[MASK]".
Examples:
.. code-block:: python
from paddle.hapi.text import BertTokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# the following line get: ['he', 'was', 'a', 'puppet', '##eer']
tokens = tokenizer('He was a puppeteer')
# the following line get: 'he was a puppeteer'
tokenizer.convert_tokens_to_string(tokens)
"""
resource_files_names = {"vocab_file": "vocab.txt"} # for save_pretrained
pretrained_resource_files_map = {
"vocab_file": {
"bert-base-uncased":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-uncased-vocab.txt",
"bert-large-uncased":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-large-uncased-vocab.txt",
"bert-base-cased":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-cased-vocab.txt",
"bert-large-cased":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-large-cased-vocab.txt",
"bert-base-multilingual-uncased":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-multilingual-uncased-vocab.txt",
"bert-base-multilingual-cased":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-multilingual-cased-vocab.txt",
"bert-base-chinese":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-chinese-vocab.txt",
"bert-wwm-chinese":
"http://paddlenlp.bj.bcebos.com/models/transformers/bert/bert-wwm-chinese-vocab.txt",
"bert-wwm-ext-chinese":
"http://paddlenlp.bj.bcebos.com/models/transformers/bert/bert-wwm-ext-chinese-vocab.txt",
"macbert-large-chinese":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-chinese-vocab.txt",
"macbert-base-chinese":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-chinese-vocab.txt",
"simbert-base-chinese":
"https://paddlenlp.bj.bcebos.com/models/transformers/simbert/vocab.txt",
}
}
pretrained_init_configuration = {
"bert-base-uncased": {
"do_lower_case": True
},
"bert-large-uncased": {
"do_lower_case": True
},
"bert-base-cased": {
"do_lower_case": False
},
"bert-large-cased": {
"do_lower_case": False
},
"bert-base-multilingual-uncased": {
"do_lower_case": True
},
"bert-base-multilingual-cased": {
"do_lower_case": False
},
"bert-base-chinese": {
"do_lower_case": False
},
"bert-wwm-chinese": {
"do_lower_case": False
},
"bert-wwm-ext-chinese": {
"do_lower_case": False
},
"macbert-large-chinese": {
"do_lower_case": False
},
"macbert-base-chinese": {
"do_lower_case": False
},
"simbert-base-chinese":{
"do_lower_case": True
},
}
padding_side = 'right'
def __init__(self,
vocab_file,
do_lower_case=True,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]"):
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the "
"vocabulary from a pretrained model please use "
"`tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
.format(vocab_file))
self.vocab = self.load_vocabulary(vocab_file, unk_token=unk_token)
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(
vocab=self.vocab, unk_token=unk_token)
@property
def vocab_size(self):
"""
return the size of vocabulary.
Returns:
int: the size of vocabulary.
"""
return len(self.vocab)
def _tokenize(self, text):
"""
End-to-end tokenization for BERT models.
Args:
text (str): The text to be tokenized.
Returns:
list: A list of string representing converted tokens.
"""
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def tokenize(self, text):
"""
End-to-end tokenization for BERT models.
Args:
text (str): The text to be tokenized.
Returns:
list: A list of string representing converted tokens.
"""
return self._tokenize(text)
def convert_tokens_to_string(self, tokens):
"""
Converts a sequence of tokens (list of string) in a single string. Since
the usage of WordPiece introducing `##` to concat subwords, also remove
`##` when converting.
Args:
tokens (list): A list of string representing tokens to be converted.
Returns:
str: Converted string from tokens.
"""
out_string = " ".join(tokens).replace(" ##", "").strip()
return out_string
def num_special_tokens_to_add(self, pair=False):
"""
Returns the number of added tokens when encoding a sequence with special tokens.
Note:
This encodes inputs and checks the number of added tokens, and is therefore not efficient. Do not put this
inside your training loop.
Args:
pair: Returns the number of added tokens in the case of a sequence pair if set to True, returns the
number of added tokens in the case of a single sequence if set to False.
Returns:
Number of tokens added to sequences
"""
token_ids_0 = []
token_ids_1 = []
return len(
self.build_inputs_with_special_tokens(token_ids_0, token_ids_1
if pair else None))
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens.
A BERT sequence has the following format:
::
- single sequence: ``[CLS] X [SEP]``
- pair of sequences: ``[CLS] A [SEP] B [SEP]``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of input_id with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
_cls = [self.cls_token_id]
_sep = [self.sep_token_id]
return _cls + token_ids_0 + _sep + token_ids_1 + _sep
def build_offset_mapping_with_special_tokens(self,
offset_mapping_0,
offset_mapping_1=None):
"""
Build offset map from a pair of offset map by concatenating and adding offsets of special tokens.
A BERT offset_mapping has the following format:
::
- single sequence: ``(0,0) X (0,0)``
- pair of sequences: `(0,0) A (0,0) B (0,0)``
Args:
offset_mapping_ids_0 (:obj:`List[tuple]`):
List of char offsets to which the special tokens will be added.
offset_mapping_ids_1 (:obj:`List[tuple]`, `optional`):
Optional second list of char offsets for offset mapping pairs.
Returns:
:obj:`List[tuple]`: List of char offsets with the appropriate offsets of special tokens.
"""
if offset_mapping_1 is None:
return [(0, 0)] + offset_mapping_0 + [(0, 0)]
return [(0, 0)] + offset_mapping_0 + [(0, 0)
] + offset_mapping_1 + [(0, 0)]
def create_token_type_ids_from_sequences(self,
token_ids_0,
token_ids_1=None):
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task.
A BERT sequence pair mask has the following format:
::
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of token_type_id according to the given sequence(s).
"""
_sep = [self.sep_token_id]
_cls = [self.cls_token_id]
if token_ids_1 is None:
return len(_cls + token_ids_0 + _sep) * [0]
return len(_cls + token_ids_0 + _sep) * [0] + len(token_ids_1 +
_sep) * [1]
def get_special_tokens_mask(self,
token_ids_0,
token_ids_1=None,
already_has_special_tokens=False):
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``encode`` methods.
Args:
token_ids_0 (List[int]): List of ids of the first sequence.
token_ids_1 (List[int], optinal): List of ids of the second sequence.
already_has_special_tokens (bool, optional): Whether or not the token list is already
formatted with special tokens for the model. Defaults to None.
Returns:
results (List[int]): The list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model."
)
return list(
map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0,
token_ids_0))
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + (
[0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
| 37.863462 | 121 | 0.565646 |
import copy
import io
import json
import os
import six
import unicodedata
from .. import PretrainedTokenizer
from ..tokenizer_utils import convert_to_unicode, whitespace_tokenize, _is_whitespace, _is_control, _is_punctuation
__all__ = ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer']
class BasicTokenizer(object):
def __init__(self, do_lower_case=True):
self.do_lower_case = do_lower_case
def tokenize(self, text):
text = convert_to_unicode(text)
text = self._clean_text(text)
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
if ((cp >= 0x4E00 and cp <= 0x9FFF) or
(cp >= 0x3400 and cp <= 0x4DBF) or
(cp >= 0x20000 and cp <= 0x2A6DF) or
(cp >= 0x2A700 and cp <= 0x2B73F) or
(cp >= 0x2B740 and cp <= 0x2B81F) or
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or
(cp >= 0x2F800 and cp <= 0x2FA1F)):
return True
return False
def _clean_text(self, text):
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
class BertTokenizer(PretrainedTokenizer):
resource_files_names = {"vocab_file": "vocab.txt"}
pretrained_resource_files_map = {
"vocab_file": {
"bert-base-uncased":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-uncased-vocab.txt",
"bert-large-uncased":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-large-uncased-vocab.txt",
"bert-base-cased":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-cased-vocab.txt",
"bert-large-cased":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-large-cased-vocab.txt",
"bert-base-multilingual-uncased":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-multilingual-uncased-vocab.txt",
"bert-base-multilingual-cased":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-multilingual-cased-vocab.txt",
"bert-base-chinese":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-chinese-vocab.txt",
"bert-wwm-chinese":
"http://paddlenlp.bj.bcebos.com/models/transformers/bert/bert-wwm-chinese-vocab.txt",
"bert-wwm-ext-chinese":
"http://paddlenlp.bj.bcebos.com/models/transformers/bert/bert-wwm-ext-chinese-vocab.txt",
"macbert-large-chinese":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-chinese-vocab.txt",
"macbert-base-chinese":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-chinese-vocab.txt",
"simbert-base-chinese":
"https://paddlenlp.bj.bcebos.com/models/transformers/simbert/vocab.txt",
}
}
pretrained_init_configuration = {
"bert-base-uncased": {
"do_lower_case": True
},
"bert-large-uncased": {
"do_lower_case": True
},
"bert-base-cased": {
"do_lower_case": False
},
"bert-large-cased": {
"do_lower_case": False
},
"bert-base-multilingual-uncased": {
"do_lower_case": True
},
"bert-base-multilingual-cased": {
"do_lower_case": False
},
"bert-base-chinese": {
"do_lower_case": False
},
"bert-wwm-chinese": {
"do_lower_case": False
},
"bert-wwm-ext-chinese": {
"do_lower_case": False
},
"macbert-large-chinese": {
"do_lower_case": False
},
"macbert-base-chinese": {
"do_lower_case": False
},
"simbert-base-chinese":{
"do_lower_case": True
},
}
padding_side = 'right'
def __init__(self,
vocab_file,
do_lower_case=True,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]"):
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the "
"vocabulary from a pretrained model please use "
"`tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
.format(vocab_file))
self.vocab = self.load_vocabulary(vocab_file, unk_token=unk_token)
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(
vocab=self.vocab, unk_token=unk_token)
@property
def vocab_size(self):
return len(self.vocab)
def _tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def tokenize(self, text):
return self._tokenize(text)
def convert_tokens_to_string(self, tokens):
out_string = " ".join(tokens).replace(" ##", "").strip()
return out_string
def num_special_tokens_to_add(self, pair=False):
token_ids_0 = []
token_ids_1 = []
return len(
self.build_inputs_with_special_tokens(token_ids_0, token_ids_1
if pair else None))
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
_cls = [self.cls_token_id]
_sep = [self.sep_token_id]
return _cls + token_ids_0 + _sep + token_ids_1 + _sep
def build_offset_mapping_with_special_tokens(self,
offset_mapping_0,
offset_mapping_1=None):
if offset_mapping_1 is None:
return [(0, 0)] + offset_mapping_0 + [(0, 0)]
return [(0, 0)] + offset_mapping_0 + [(0, 0)
] + offset_mapping_1 + [(0, 0)]
def create_token_type_ids_from_sequences(self,
token_ids_0,
token_ids_1=None):
_sep = [self.sep_token_id]
_cls = [self.cls_token_id]
if token_ids_1 is None:
return len(_cls + token_ids_0 + _sep) * [0]
return len(_cls + token_ids_0 + _sep) * [0] + len(token_ids_1 +
_sep) * [1]
def get_special_tokens_mask(self,
token_ids_0,
token_ids_1=None,
already_has_special_tokens=False):
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model."
)
return list(
map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0,
token_ids_0))
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + (
[0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
| true | true |
f71af1cd54d5851bd4030703d9d6a0bb37011f59 | 8,309 | py | Python | src/mission_node/src/intersection_detector.py | mommy79/AuDi-GIT-turtlebot3_autorace | fd1382246f1ee74ee70857006563184d672a6666 | [
"Apache-2.0"
] | 1 | 2021-06-13T06:20:15.000Z | 2021-06-13T06:20:15.000Z | src/mission_node/src/intersection_detector.py | taening/AuDi-GIT-turtlebot3_autorace | fd1382246f1ee74ee70857006563184d672a6666 | [
"Apache-2.0"
] | null | null | null | src/mission_node/src/intersection_detector.py | taening/AuDi-GIT-turtlebot3_autorace | fd1382246f1ee74ee70857006563184d672a6666 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import cv2
import math
class IntersectionDetector:
def __init__(self):
self.lower_blue = np.array([85, 90, 120], np.uint8)
self.upper_blue = np.array([115, 255, 255], np.uint8)
def fn_find_intersection_line(self, img_trans):
# ROI 영역에 맞게 자른 이미지
pers_height, pers_width = img_trans.shape[:2] # shape is w384 x h240
img_gray = cv2.cvtColor(img_trans[:int(pers_height * 1/ 2), :].copy(), cv2.COLOR_RGB2GRAY)
_, img_intersection = cv2.threshold(img_gray, 180, 255, 0)
img_intersection = cv2.morphologyEx(img_intersection, cv2.MORPH_OPEN, np.ones((5, 5), np.uint8))
img_intersection = cv2.morphologyEx(img_intersection, cv2.MORPH_CLOSE, np.ones((7, 7), np.uint8))
img_debug = cv2.merge((img_intersection, img_intersection, img_intersection)).copy()
_, list_intersection_contour, _ = cv2.findContours(img_intersection, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
intersection_check = False
for intersection_contour in list_intersection_contour:
cv2.drawContours(img_debug, [intersection_contour], 0, (0, 0, 255), 2)
x_stop, y_stop, w_stop, h_stop = cv2.boundingRect(intersection_contour)
cv2.putText(img_debug, 'w: {}, h: {}'.format(w_stop, h_stop), (intersection_contour[0][0][0]+10, intersection_contour[0][0][1]+10), cv2.FONT_HERSHEY_SIMPLEX, 0.2, (0, 255, 255))
if 330 < w_stop:
cv2.drawContours(img_debug, [intersection_contour], 0, (0, 255, 0), 2)
intersection_check = True
return intersection_check, img_debug
def fn_find_exit_line(self, img_trans, direction='left'):
# ROI 영역에 맞게 자른 이미지
pers_height, pers_width = img_trans.shape[:2] # shape is w384 x h240
if direction == 'left':
img_gray = cv2.cvtColor(img_trans[:, int(pers_width * 1/ 2):].copy(), cv2.COLOR_RGB2GRAY)
else:
img_gray = cv2.cvtColor(img_trans[:, :int(pers_width * 1/ 2)].copy(), cv2.COLOR_RGB2GRAY)
_, img_exit = cv2.threshold(img_gray, 190, 255, 0)
img_exit = cv2.morphologyEx(img_exit, cv2.MORPH_OPEN, np.ones((5, 5), np.uint8))
img_exit = cv2.morphologyEx(img_exit, cv2.MORPH_CLOSE, np.ones((7, 7), np.uint8))
img_debug = cv2.merge((img_exit, img_exit, img_exit)).copy()
_, list_exit_contour, _ = cv2.findContours(img_exit, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
exit_check = False
exit_pos = (0, 0)
for exit_contour in list_exit_contour:
cv2.drawContours(img_debug, [exit_contour], 0, (0, 0, 255), 2)
x_exit, y_exit, w_exit, h_exit = cv2.boundingRect(exit_contour)
bottom_most_pos = tuple(exit_contour[exit_contour[:, :, 1].argmax()][0])
val_height = h_exit
for pos_y in range(pers_height-1, 0, -1):
if img_gray[pos_y, bottom_most_pos[0]] != 0:
val_height = pos_y
break
cv2.putText(img_debug, 'w: {}, h: {}, length: {}'.format(w_exit, h_exit, val_height), (exit_contour[0][0][0]+10, exit_contour[0][0][1]+10), cv2.FONT_HERSHEY_SIMPLEX, 0.2, (0, 255, 255))
if h_exit > val_height * 4/5 and h_exit > pers_height/2:
cv2.drawContours(img_debug, [exit_contour], 0, (0, 255, 0), 2)
exit_pos = exit_contour[0][0]
exit_check = True
return exit_check, exit_pos, img_debug
def fn_find_direction_sign(self, img_ori):
left_sign_detect = False
right_sign_detect = False
img_height, img_width = img_ori.shape[:2]
img_roi = img_ori[:int(img_height*1 / 2), :].copy()
img_hsv = cv2.cvtColor(img_roi, cv2.COLOR_BGR2HSV)
# Hsv fillter - Blue color
img_mask_b = cv2.inRange(img_hsv, self.lower_blue, self.upper_blue)
img_mask_b = cv2.morphologyEx(img_mask_b, cv2.MORPH_OPEN, np.ones((7, 7), np.uint8))
img_mask_b = cv2.morphologyEx(img_mask_b, cv2.MORPH_CLOSE, np.ones((5, 5), np.uint8))
#_, list_obj_contour, _ = cv2.findContours(img_mask_b, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
_, list_obj_contour, _ = cv2.findContours(img_mask_b, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
img_blue = cv2.bitwise_and(img_roi, img_roi, mask=img_mask_b)
img_debug = img_roi.copy()
list_obj = []
for obj_contour in list_obj_contour:
#cv2.drawContours(img_blue, [contour], 0, (0, 0, 255), 2)
x, y, w, h = cv2.boundingRect(obj_contour)
area = cv2.contourArea(obj_contour)
aspect_ratio = float(w) / h
area_ratio = float(area) / (w*h)
cv2.rectangle(img_debug, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.putText(img_debug, 'w: {}, h: {}, aspect_ratio: {:.2f}, area_ratio: {:.2f}'.format(w, h, aspect_ratio, area_ratio), (x+10, y+10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 127, 0))
if (50 < w < 150) and (50 < h < 150) and (0.8 < aspect_ratio < 2.5) and (area_ratio > 0.5):
cv2.rectangle(img_debug, (x, y), (x + w, y + h), (0, 255, 255), 2)
list_obj.append((img_roi[y:y+h, x:x+w].copy(), (x, y, w, h)))
for (img_obj, (obj_x, obj_y, obj_w, obj_h)) in list_obj:
img_obj_gray = cv2.cvtColor(img_obj, cv2.COLOR_BGR2GRAY)
_, img_obj_binary = cv2.threshold(img_obj_gray, 180, 255, cv2.THRESH_BINARY)
img_obj_binary = cv2.morphologyEx(img_obj_binary, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))
_, list_arrow_contour, _ = cv2.findContours(img_obj_binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
obj_x_mid = int(obj_w / 2)
obj_y_mid = int(obj_h / 2)
min_val_dis = 30
bottom_most_pos = None
for arrow_contour in list_arrow_contour:
mask_arrow = np.zeros(img_obj_gray.shape, np.uint8)
cv2.drawContours(mask_arrow, [arrow_contour], 0, 255, -1)
arrow_x, arrow_y, arrow_w, arrow_h = cv2.boundingRect(arrow_contour)
cv2.rectangle(img_debug, (obj_x + arrow_x, obj_y + arrow_y), (obj_x + arrow_x + arrow_w, arrow_y + obj_y + arrow_h), (255, 255, 0), 1)
arrow_area = cv2.contourArea(arrow_contour)
arrow_aspect_ratio = float(arrow_w) / arrow_h
arrow_area_ratio = float(arrow_area) / (arrow_w * arrow_h)
arrow_x_mid = int(arrow_x + arrow_w / 2)
arrow_y_mid = int(arrow_y + arrow_h / 2)
if (0.4 * obj_w < arrow_w) and (0.4 * obj_h < arrow_h) and (0.5 < arrow_aspect_ratio < 2) and (arrow_area_ratio > 0.3):
val_dis = math.sqrt((arrow_x_mid - obj_x_mid) ** 2 + (arrow_y_mid - obj_y_mid) ** 2)
if val_dis < min_val_dis:
min_val_dis = val_dis
#left_most_pos = tuple(obj_contour[obj_contour[:, :, 0].argmin()][0])
#right_most_pos = tuple(obj_contour[obj_contour[:, :, 0].argmax()][0])
#top_most_pos = tuple(obj_contour[obj_contour[:, :, 1].argmin()][0])
bottom_most_pos = tuple(arrow_contour[arrow_contour[:, :, 1].argmax()][0])
if bottom_most_pos is not None:
cv2.circle(img_debug, (obj_x + bottom_most_pos[0], obj_y + bottom_most_pos[1]), 4, (0, 0, 255), -1)
cv2.line(img_debug, (obj_x + obj_x_mid, obj_y), (obj_x + obj_x_mid, obj_y + obj_h), (255, 0, 255), 2)
if bottom_most_pos[0] > obj_x_mid:
left_sign_detect = True
cv2.putText(img_debug, 'LEFT', (obj_x+10, obj_y+20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
cv2.rectangle(img_debug, (obj_x, obj_y), (obj_x + obj_w, obj_y + obj_h), (255, 0, 0), 2)
else:
right_sign_detect = True
cv2.putText(img_debug, 'RIGHT', (obj_x+3, obj_y+20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
cv2.rectangle(img_debug, (obj_x, obj_y), (obj_x + obj_w, obj_y + obj_h), (0, 255, 0), 2)
return left_sign_detect, right_sign_detect, np.vstack((img_debug, img_blue))
| 54.664474 | 197 | 0.603562 |
import numpy as np
import cv2
import math
class IntersectionDetector:
def __init__(self):
self.lower_blue = np.array([85, 90, 120], np.uint8)
self.upper_blue = np.array([115, 255, 255], np.uint8)
def fn_find_intersection_line(self, img_trans):
pers_height, pers_width = img_trans.shape[:2]
img_gray = cv2.cvtColor(img_trans[:int(pers_height * 1/ 2), :].copy(), cv2.COLOR_RGB2GRAY)
_, img_intersection = cv2.threshold(img_gray, 180, 255, 0)
img_intersection = cv2.morphologyEx(img_intersection, cv2.MORPH_OPEN, np.ones((5, 5), np.uint8))
img_intersection = cv2.morphologyEx(img_intersection, cv2.MORPH_CLOSE, np.ones((7, 7), np.uint8))
img_debug = cv2.merge((img_intersection, img_intersection, img_intersection)).copy()
_, list_intersection_contour, _ = cv2.findContours(img_intersection, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
intersection_check = False
for intersection_contour in list_intersection_contour:
cv2.drawContours(img_debug, [intersection_contour], 0, (0, 0, 255), 2)
x_stop, y_stop, w_stop, h_stop = cv2.boundingRect(intersection_contour)
cv2.putText(img_debug, 'w: {}, h: {}'.format(w_stop, h_stop), (intersection_contour[0][0][0]+10, intersection_contour[0][0][1]+10), cv2.FONT_HERSHEY_SIMPLEX, 0.2, (0, 255, 255))
if 330 < w_stop:
cv2.drawContours(img_debug, [intersection_contour], 0, (0, 255, 0), 2)
intersection_check = True
return intersection_check, img_debug
def fn_find_exit_line(self, img_trans, direction='left'):
pers_height, pers_width = img_trans.shape[:2]
if direction == 'left':
img_gray = cv2.cvtColor(img_trans[:, int(pers_width * 1/ 2):].copy(), cv2.COLOR_RGB2GRAY)
else:
img_gray = cv2.cvtColor(img_trans[:, :int(pers_width * 1/ 2)].copy(), cv2.COLOR_RGB2GRAY)
_, img_exit = cv2.threshold(img_gray, 190, 255, 0)
img_exit = cv2.morphologyEx(img_exit, cv2.MORPH_OPEN, np.ones((5, 5), np.uint8))
img_exit = cv2.morphologyEx(img_exit, cv2.MORPH_CLOSE, np.ones((7, 7), np.uint8))
img_debug = cv2.merge((img_exit, img_exit, img_exit)).copy()
_, list_exit_contour, _ = cv2.findContours(img_exit, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
exit_check = False
exit_pos = (0, 0)
for exit_contour in list_exit_contour:
cv2.drawContours(img_debug, [exit_contour], 0, (0, 0, 255), 2)
x_exit, y_exit, w_exit, h_exit = cv2.boundingRect(exit_contour)
bottom_most_pos = tuple(exit_contour[exit_contour[:, :, 1].argmax()][0])
val_height = h_exit
for pos_y in range(pers_height-1, 0, -1):
if img_gray[pos_y, bottom_most_pos[0]] != 0:
val_height = pos_y
break
cv2.putText(img_debug, 'w: {}, h: {}, length: {}'.format(w_exit, h_exit, val_height), (exit_contour[0][0][0]+10, exit_contour[0][0][1]+10), cv2.FONT_HERSHEY_SIMPLEX, 0.2, (0, 255, 255))
if h_exit > val_height * 4/5 and h_exit > pers_height/2:
cv2.drawContours(img_debug, [exit_contour], 0, (0, 255, 0), 2)
exit_pos = exit_contour[0][0]
exit_check = True
return exit_check, exit_pos, img_debug
def fn_find_direction_sign(self, img_ori):
left_sign_detect = False
right_sign_detect = False
img_height, img_width = img_ori.shape[:2]
img_roi = img_ori[:int(img_height*1 / 2), :].copy()
img_hsv = cv2.cvtColor(img_roi, cv2.COLOR_BGR2HSV)
img_mask_b = cv2.inRange(img_hsv, self.lower_blue, self.upper_blue)
img_mask_b = cv2.morphologyEx(img_mask_b, cv2.MORPH_OPEN, np.ones((7, 7), np.uint8))
img_mask_b = cv2.morphologyEx(img_mask_b, cv2.MORPH_CLOSE, np.ones((5, 5), np.uint8))
_, list_obj_contour, _ = cv2.findContours(img_mask_b, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
img_blue = cv2.bitwise_and(img_roi, img_roi, mask=img_mask_b)
img_debug = img_roi.copy()
list_obj = []
for obj_contour in list_obj_contour:
x, y, w, h = cv2.boundingRect(obj_contour)
area = cv2.contourArea(obj_contour)
aspect_ratio = float(w) / h
area_ratio = float(area) / (w*h)
cv2.rectangle(img_debug, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.putText(img_debug, 'w: {}, h: {}, aspect_ratio: {:.2f}, area_ratio: {:.2f}'.format(w, h, aspect_ratio, area_ratio), (x+10, y+10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 127, 0))
if (50 < w < 150) and (50 < h < 150) and (0.8 < aspect_ratio < 2.5) and (area_ratio > 0.5):
cv2.rectangle(img_debug, (x, y), (x + w, y + h), (0, 255, 255), 2)
list_obj.append((img_roi[y:y+h, x:x+w].copy(), (x, y, w, h)))
for (img_obj, (obj_x, obj_y, obj_w, obj_h)) in list_obj:
img_obj_gray = cv2.cvtColor(img_obj, cv2.COLOR_BGR2GRAY)
_, img_obj_binary = cv2.threshold(img_obj_gray, 180, 255, cv2.THRESH_BINARY)
img_obj_binary = cv2.morphologyEx(img_obj_binary, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))
_, list_arrow_contour, _ = cv2.findContours(img_obj_binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
obj_x_mid = int(obj_w / 2)
obj_y_mid = int(obj_h / 2)
min_val_dis = 30
bottom_most_pos = None
for arrow_contour in list_arrow_contour:
mask_arrow = np.zeros(img_obj_gray.shape, np.uint8)
cv2.drawContours(mask_arrow, [arrow_contour], 0, 255, -1)
arrow_x, arrow_y, arrow_w, arrow_h = cv2.boundingRect(arrow_contour)
cv2.rectangle(img_debug, (obj_x + arrow_x, obj_y + arrow_y), (obj_x + arrow_x + arrow_w, arrow_y + obj_y + arrow_h), (255, 255, 0), 1)
arrow_area = cv2.contourArea(arrow_contour)
arrow_aspect_ratio = float(arrow_w) / arrow_h
arrow_area_ratio = float(arrow_area) / (arrow_w * arrow_h)
arrow_x_mid = int(arrow_x + arrow_w / 2)
arrow_y_mid = int(arrow_y + arrow_h / 2)
if (0.4 * obj_w < arrow_w) and (0.4 * obj_h < arrow_h) and (0.5 < arrow_aspect_ratio < 2) and (arrow_area_ratio > 0.3):
val_dis = math.sqrt((arrow_x_mid - obj_x_mid) ** 2 + (arrow_y_mid - obj_y_mid) ** 2)
if val_dis < min_val_dis:
min_val_dis = val_dis
bottom_most_pos = tuple(arrow_contour[arrow_contour[:, :, 1].argmax()][0])
if bottom_most_pos is not None:
cv2.circle(img_debug, (obj_x + bottom_most_pos[0], obj_y + bottom_most_pos[1]), 4, (0, 0, 255), -1)
cv2.line(img_debug, (obj_x + obj_x_mid, obj_y), (obj_x + obj_x_mid, obj_y + obj_h), (255, 0, 255), 2)
if bottom_most_pos[0] > obj_x_mid:
left_sign_detect = True
cv2.putText(img_debug, 'LEFT', (obj_x+10, obj_y+20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
cv2.rectangle(img_debug, (obj_x, obj_y), (obj_x + obj_w, obj_y + obj_h), (255, 0, 0), 2)
else:
right_sign_detect = True
cv2.putText(img_debug, 'RIGHT', (obj_x+3, obj_y+20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
cv2.rectangle(img_debug, (obj_x, obj_y), (obj_x + obj_w, obj_y + obj_h), (0, 255, 0), 2)
return left_sign_detect, right_sign_detect, np.vstack((img_debug, img_blue))
| true | true |
f71af1f582d2aaa6d4011db4c3ff8c3821e87e34 | 32,935 | py | Python | numpyro/distributions/transforms.py | ahoho/numpyro | 64e94e346c51a6c0c1ba51aa7b608e73513f158f | [
"Apache-2.0"
] | null | null | null | numpyro/distributions/transforms.py | ahoho/numpyro | 64e94e346c51a6c0c1ba51aa7b608e73513f158f | [
"Apache-2.0"
] | null | null | null | numpyro/distributions/transforms.py | ahoho/numpyro | 64e94e346c51a6c0c1ba51aa7b608e73513f158f | [
"Apache-2.0"
] | null | null | null | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import math
import warnings
import weakref
import numpy as np
from jax import lax, ops, tree_flatten, tree_map, vmap
from jax.flatten_util import ravel_pytree
from jax.nn import softplus
import jax.numpy as jnp
from jax.scipy.linalg import solve_triangular
from jax.scipy.special import expit, logit
from numpyro.distributions import constraints
from numpyro.distributions.util import matrix_to_tril_vec, signed_stick_breaking_tril, sum_rightmost, vec_to_tril_matrix
from numpyro.util import not_jax_tracer
__all__ = [
'biject_to',
'AbsTransform',
'AffineTransform',
'CholeskyTransform',
'ComposeTransform',
'CorrCholeskyTransform',
'CorrMatrixCholeskyTransform',
'ExpTransform',
'SoftplusTransform',
'IdentityTransform',
'InvCholeskyTransform',
'LowerCholeskyTransform',
'LowerCholeskyAffine',
'PermuteTransform',
'PowerTransform',
'SigmoidTransform',
'SoftplusTransform',
'SoftplusLowerCholeskyTransform',
'StickBreakingTransform',
'Transform',
'UnpackTransform',
]
def _clipped_expit(x):
finfo = jnp.finfo(jnp.result_type(x))
return jnp.clip(expit(x), a_min=finfo.tiny, a_max=1. - finfo.eps)
class Transform(object):
domain = constraints.real
codomain = constraints.real
_inv = None
@property
def event_dim(self):
warnings.warn("transform.event_dim is deprecated. Please use Transform.domain.event_dim to "
"get input event dim or Transform.codomain.event_dim to get output event dim.",
FutureWarning)
return self.domain.event_dim
@property
def inv(self):
inv = None
if self._inv is not None:
inv = self._inv()
if inv is None:
inv = _InverseTransform(self)
self._inv = weakref.ref(inv)
return inv
def __call__(self, x):
return NotImplementedError
def _inverse(self, y):
raise NotImplementedError
def log_abs_det_jacobian(self, x, y, intermediates=None):
raise NotImplementedError
def call_with_intermediates(self, x):
return self(x), None
def forward_shape(self, shape):
"""
Infers the shape of the forward computation, given the input shape.
Defaults to preserving shape.
"""
return shape
def inverse_shape(self, shape):
"""
Infers the shapes of the inverse computation, given the output shape.
Defaults to preserving shape.
"""
return shape
class _InverseTransform(Transform):
def __init__(self, transform):
super().__init__()
self._inv = transform
@property
def domain(self):
return self._inv.codomain
@property
def codomain(self):
return self._inv.domain
@property
def inv(self):
return self._inv
def __call__(self, x):
return self._inv._inverse(x)
def log_abs_det_jacobian(self, x, y, intermediates=None):
# NB: we don't use intermediates for inverse transform
return -self._inv.log_abs_det_jacobian(y, x, None)
def forward_shape(self, shape):
return self._inv.inverse_shape(shape)
def inverse_shape(self, shape):
return self._inv.forward_shape(shape)
class AbsTransform(Transform):
domain = constraints.real
codomain = constraints.positive
def __eq__(self, other):
return isinstance(other, AbsTransform)
def __call__(self, x):
return jnp.abs(x)
def _inverse(self, y):
return y
class AffineTransform(Transform):
"""
.. note:: When `scale` is a JAX tracer, we always assume that `scale > 0`
when calculating `codomain`.
"""
def __init__(self, loc, scale, domain=constraints.real):
self.loc = loc
self.scale = scale
self.domain = domain
@property
def codomain(self):
if self.domain is constraints.real:
return constraints.real
elif isinstance(self.domain, constraints.greater_than):
if not_jax_tracer(self.scale) and np.all(np.less(self.scale, 0)):
return constraints.less_than(self(self.domain.lower_bound))
# we suppose scale > 0 for any tracer
else:
return constraints.greater_than(self(self.domain.lower_bound))
elif isinstance(self.domain, constraints.less_than):
if not_jax_tracer(self.scale) and np.all(np.less(self.scale, 0)):
return constraints.greater_than(self(self.domain.upper_bound))
# we suppose scale > 0 for any tracer
else:
return constraints.less_than(self(self.domain.upper_bound))
elif isinstance(self.domain, constraints.interval):
if not_jax_tracer(self.scale) and np.all(np.less(self.scale, 0)):
return constraints.interval(self(self.domain.upper_bound),
self(self.domain.lower_bound))
else:
return constraints.interval(self(self.domain.lower_bound),
self(self.domain.upper_bound))
else:
raise NotImplementedError
def __call__(self, x):
return self.loc + self.scale * x
def _inverse(self, y):
return (y - self.loc) / self.scale
def log_abs_det_jacobian(self, x, y, intermediates=None):
return jnp.broadcast_to(jnp.log(jnp.abs(self.scale)), jnp.shape(x))
def forward_shape(self, shape):
return lax.broadcast_shapes(shape,
getattr(self.loc, "shape", ()),
getattr(self.scale, "shape", ()))
def inverse_shape(self, shape):
return lax.broadcast_shapes(shape,
getattr(self.loc, "shape", ()),
getattr(self.scale, "shape", ()))
def _get_compose_transform_input_event_dim(parts):
input_event_dim = parts[-1].domain.event_dim
for part in parts[len(parts) - 1::-1]:
input_event_dim = part.domain.event_dim + max(input_event_dim - part.codomain.event_dim, 0)
return input_event_dim
def _get_compose_transform_output_event_dim(parts):
output_event_dim = parts[0].codomain.event_dim
for part in parts[1:]:
output_event_dim = part.codomain.event_dim + max(output_event_dim - part.domain.event_dim, 0)
return output_event_dim
class ComposeTransform(Transform):
def __init__(self, parts):
self.parts = parts
@property
def domain(self):
input_event_dim = _get_compose_transform_input_event_dim(self.parts)
first_input_event_dim = self.parts[0].domain.event_dim
assert input_event_dim >= first_input_event_dim
if input_event_dim == first_input_event_dim:
return self.parts[0].domain
else:
return constraints.independent(self.parts[0].domain, input_event_dim - first_input_event_dim)
@property
def codomain(self):
output_event_dim = _get_compose_transform_output_event_dim(self.parts)
last_output_event_dim = self.parts[-1].codomain.event_dim
assert output_event_dim >= last_output_event_dim
if output_event_dim == last_output_event_dim:
return self.parts[-1].codomain
else:
return constraints.independent(self.parts[-1].codomain, output_event_dim - last_output_event_dim)
def __call__(self, x):
for part in self.parts:
x = part(x)
return x
def _inverse(self, y):
for part in self.parts[::-1]:
y = part.inv(y)
return y
def log_abs_det_jacobian(self, x, y, intermediates=None):
if intermediates is not None:
if len(intermediates) != len(self.parts):
raise ValueError('Intermediates array has length = {}. Expected = {}.'
.format(len(intermediates), len(self.parts)))
result = 0.
input_event_dim = self.domain.event_dim
for i, part in enumerate(self.parts[:-1]):
y_tmp = part(x) if intermediates is None else intermediates[i][0]
inter = None if intermediates is None else intermediates[i][1]
logdet = part.log_abs_det_jacobian(x, y_tmp, intermediates=inter)
batch_ndim = input_event_dim - part.domain.event_dim
result = result + sum_rightmost(logdet, batch_ndim)
input_event_dim = part.codomain.event_dim + batch_ndim
x = y_tmp
# account the the last transform, where y is available
inter = None if intermediates is None else intermediates[-1]
part = self.parts[-1]
logdet = part.log_abs_det_jacobian(x, y, intermediates=inter)
result = result + sum_rightmost(logdet, input_event_dim - part.domain.event_dim)
return result
def call_with_intermediates(self, x):
intermediates = []
for part in self.parts[:-1]:
x, inter = part.call_with_intermediates(x)
intermediates.append([x, inter])
# NB: we don't need to hold the last output value in `intermediates`
x, inter = self.parts[-1].call_with_intermediates(x)
intermediates.append(inter)
return x, intermediates
def forward_shape(self, shape):
for part in self.parts:
shape = part.forward_shape(shape)
return shape
def inverse_shape(self, shape):
for part in reversed(self.parts):
shape = part.inverse_shape(shape)
return shape
def _matrix_forward_shape(shape, offset=0):
# Reshape from (..., N) to (..., D, D).
if len(shape) < 1:
raise ValueError("Too few dimensions in input")
N = shape[-1]
D = round((0.25 + 2 * N) ** 0.5 - 0.5)
if D * (D + 1) // 2 != N:
raise ValueError("Input is not a flattend lower-diagonal number")
D = D - offset
return shape[:-1] + (D, D)
def _matrix_inverse_shape(shape, offset=0):
# Reshape from (..., D, D) to (..., N).
if len(shape) < 2:
raise ValueError("Too few dimensions on input")
if shape[-2] != shape[-1]:
raise ValueError("Input is not square")
D = shape[-1] + offset
N = D * (D + 1) // 2
return shape[:-2] + (N,)
class CholeskyTransform(Transform):
r"""
Transform via the mapping :math:`y = cholesky(x)`, where `x` is a
positive definite matrix.
"""
domain = constraints.positive_definite
codomain = constraints.lower_cholesky
def __call__(self, x):
return jnp.linalg.cholesky(x)
def _inverse(self, y):
return jnp.matmul(y, jnp.swapaxes(y, -2, -1))
def log_abs_det_jacobian(self, x, y, intermediates=None):
# Ref: http://web.mit.edu/18.325/www/handouts/handout2.pdf page 13
n = jnp.shape(x)[-1]
order = -jnp.arange(n, 0, -1)
return -n * jnp.log(2) + jnp.sum(order * jnp.log(jnp.diagonal(y, axis1=-2, axis2=-1)), axis=-1)
class CorrCholeskyTransform(Transform):
r"""
Transforms a uncontrained real vector :math:`x` with length :math:`D*(D-1)/2` into the
Cholesky factor of a D-dimension correlation matrix. This Cholesky factor is a lower
triangular matrix with positive diagonals and unit Euclidean norm for each row.
The transform is processed as follows:
1. First we convert :math:`x` into a lower triangular matrix with the following order:
.. math::
\begin{bmatrix}
1 & 0 & 0 & 0 \\
x_0 & 1 & 0 & 0 \\
x_1 & x_2 & 1 & 0 \\
x_3 & x_4 & x_5 & 1
\end{bmatrix}
2. For each row :math:`X_i` of the lower triangular part, we apply a *signed* version of
class :class:`StickBreakingTransform` to transform :math:`X_i` into a
unit Euclidean length vector using the following steps:
a. Scales into the interval :math:`(-1, 1)` domain: :math:`r_i = \tanh(X_i)`.
b. Transforms into an unsigned domain: :math:`z_i = r_i^2`.
c. Applies :math:`s_i = StickBreakingTransform(z_i)`.
d. Transforms back into signed domain: :math:`y_i = (sign(r_i), 1) * \sqrt{s_i}`.
"""
domain = constraints.real_vector
codomain = constraints.corr_cholesky
def __call__(self, x):
# we interchange step 1 and step 2.a for a better performance
t = jnp.tanh(x)
return signed_stick_breaking_tril(t)
def _inverse(self, y):
# inverse stick-breaking
z1m_cumprod = 1 - jnp.cumsum(y * y, axis=-1)
pad_width = [(0, 0)] * y.ndim
pad_width[-1] = (1, 0)
z1m_cumprod_shifted = jnp.pad(z1m_cumprod[..., :-1], pad_width,
mode="constant", constant_values=1.)
t = matrix_to_tril_vec(y, diagonal=-1) / jnp.sqrt(
matrix_to_tril_vec(z1m_cumprod_shifted, diagonal=-1))
# inverse of tanh
x = jnp.log((1 + t) / (1 - t)) / 2
return x
def log_abs_det_jacobian(self, x, y, intermediates=None):
# NB: because domain and codomain are two spaces with different dimensions, determinant of
# Jacobian is not well-defined. Here we return `log_abs_det_jacobian` of `x` and the
# flatten lower triangular part of `y`.
# stick_breaking_logdet = log(y / r) = log(z_cumprod) (modulo right shifted)
z1m_cumprod = 1 - jnp.cumsum(y * y, axis=-1)
# by taking diagonal=-2, we don't need to shift z_cumprod to the right
# NB: diagonal=-2 works fine for (2 x 2) matrix, where we get an empty array
z1m_cumprod_tril = matrix_to_tril_vec(z1m_cumprod, diagonal=-2)
stick_breaking_logdet = 0.5 * jnp.sum(jnp.log(z1m_cumprod_tril), axis=-1)
tanh_logdet = -2 * jnp.sum(x + softplus(-2 * x) - jnp.log(2.), axis=-1)
return stick_breaking_logdet + tanh_logdet
def forward_shape(self, shape):
return _matrix_forward_shape(shape, offset=-1)
def inverse_shape(self, shape):
return _matrix_inverse_shape(shape, offset=-1)
class CorrMatrixCholeskyTransform(CholeskyTransform):
r"""
Transform via the mapping :math:`y = cholesky(x)`, where `x` is a
correlation matrix.
"""
domain = constraints.corr_matrix
codomain = constraints.corr_cholesky
def log_abs_det_jacobian(self, x, y, intermediates=None):
# NB: see derivation in LKJCholesky implementation
n = jnp.shape(x)[-1]
order = -jnp.arange(n - 1, -1, -1)
return jnp.sum(order * jnp.log(jnp.diagonal(y, axis1=-2, axis2=-1)), axis=-1)
class ExpTransform(Transform):
# TODO: refine domain/codomain logic through setters, especially when
# transforms for inverses are supported
def __init__(self, domain=constraints.real):
self.domain = domain
@property
def codomain(self):
if self.domain is constraints.real:
return constraints.positive
elif isinstance(self.domain, constraints.greater_than):
return constraints.greater_than(self.__call__(self.domain.lower_bound))
elif isinstance(self.domain, constraints.interval):
return constraints.interval(self.__call__(self.domain.lower_bound),
self.__call__(self.domain.upper_bound))
else:
raise NotImplementedError
def __call__(self, x):
# XXX consider to clamp from below for stability if necessary
return jnp.exp(x)
def _inverse(self, y):
return jnp.log(y)
def log_abs_det_jacobian(self, x, y, intermediates=None):
return x
class IdentityTransform(Transform):
def __call__(self, x):
return x
def _inverse(self, y):
return y
def log_abs_det_jacobian(self, x, y, intermediates=None):
return jnp.zeros_like(x)
class IndependentTransform(Transform):
"""
Wraps a transform by aggregating over ``reinterpreted_batch_ndims``-many
dims in :meth:`check`, so that an event is valid only if all its
independent entries are valid.
"""
def __init__(self, base_transform, reinterpreted_batch_ndims):
assert isinstance(base_transform, Transform)
assert isinstance(reinterpreted_batch_ndims, int)
assert reinterpreted_batch_ndims >= 0
self.base_transform = base_transform
self.reinterpreted_batch_ndims = reinterpreted_batch_ndims
super().__init__()
@property
def domain(self):
return constraints.independent(self.base_transform.domain, self.reinterpreted_batch_ndims)
@property
def codomain(self):
return constraints.independent(self.base_transform.codomain, self.reinterpreted_batch_ndims)
def __call__(self, x):
return self.base_transform(x)
def _inverse(self, y):
return self.base_transform._inverse(y)
def log_abs_det_jacobian(self, x, y, intermediates=None):
result = self.base_transform.log_abs_det_jacobian(x, y, intermediates=intermediates)
if jnp.ndim(result) < self.reinterpreted_batch_ndims:
expected = self.domain.event_dim
raise ValueError(f"Expected x.dim() >= {expected} but got {jnp.ndim(x)}")
return sum_rightmost(result, self.reinterpreted_batch_ndims)
def call_with_intermediates(self, x):
return self.base_transform.call_with_intermediates(x)
def forward_shape(self, shape):
return self.base_transform.forward_shape(shape)
def inverse_shape(self, shape):
return self.base_transform.inverse_shape(shape)
class InvCholeskyTransform(Transform):
r"""
Transform via the mapping :math:`y = x @ x.T`, where `x` is a lower
triangular matrix with positive diagonal.
"""
def __init__(self, domain=constraints.lower_cholesky):
warnings.warn("InvCholeskyTransform is deprecated. Please use CholeskyTransform"
" or CorrMatrixCholeskyTransform instead.", FutureWarning)
assert domain in [constraints.lower_cholesky, constraints.corr_cholesky]
self.domain = domain
@property
def codomain(self):
if self.domain is constraints.lower_cholesky:
return constraints.positive_definite
elif self.domain is constraints.corr_cholesky:
return constraints.corr_matrix
def __call__(self, x):
return jnp.matmul(x, jnp.swapaxes(x, -2, -1))
def _inverse(self, y):
return jnp.linalg.cholesky(y)
def log_abs_det_jacobian(self, x, y, intermediates=None):
if self.domain is constraints.lower_cholesky:
# Ref: http://web.mit.edu/18.325/www/handouts/handout2.pdf page 13
n = jnp.shape(x)[-1]
order = jnp.arange(n, 0, -1)
return n * jnp.log(2) + jnp.sum(order * jnp.log(jnp.diagonal(x, axis1=-2, axis2=-1)), axis=-1)
else:
# NB: see derivation in LKJCholesky implementation
n = jnp.shape(x)[-1]
order = jnp.arange(n - 1, -1, -1)
return jnp.sum(order * jnp.log(jnp.diagonal(x, axis1=-2, axis2=-1)), axis=-1)
class LowerCholeskyAffine(Transform):
r"""
Transform via the mapping :math:`y = loc + scale\_tril\ @\ x`.
:param loc: a real vector.
:param scale_tril: a lower triangular matrix with positive diagonal.
"""
domain = constraints.real_vector
codomain = constraints.real_vector
def __init__(self, loc, scale_tril):
if jnp.ndim(scale_tril) != 2:
raise ValueError("Only support 2-dimensional scale_tril matrix. "
"Please make a feature request if you need to "
"use this transform with batched scale_tril.")
self.loc = loc
self.scale_tril = scale_tril
def __call__(self, x):
return self.loc + jnp.squeeze(jnp.matmul(self.scale_tril, x[..., jnp.newaxis]), axis=-1)
def _inverse(self, y):
y = y - self.loc
original_shape = jnp.shape(y)
yt = jnp.reshape(y, (-1, original_shape[-1])).T
xt = solve_triangular(self.scale_tril, yt, lower=True)
return jnp.reshape(xt.T, original_shape)
def log_abs_det_jacobian(self, x, y, intermediates=None):
return jnp.broadcast_to(jnp.log(jnp.diagonal(self.scale_tril, axis1=-2, axis2=-1)).sum(-1),
jnp.shape(x)[:-1])
def forward_shape(self, shape):
if len(shape) < 1:
raise ValueError("Too few dimensions on input")
return lax.broadcast_shapes(shape, self.loc.shape, self.scale_tril.shape[:-1])
def inverse_shape(self, shape):
if len(shape) < 1:
raise ValueError("Too few dimensions on input")
return lax.broadcast_shapes(shape, self.loc.shape, self.scale_tril.shape[:-1])
class LowerCholeskyTransform(Transform):
domain = constraints.real_vector
codomain = constraints.lower_cholesky
def __call__(self, x):
n = round((math.sqrt(1 + 8 * x.shape[-1]) - 1) / 2)
z = vec_to_tril_matrix(x[..., :-n], diagonal=-1)
diag = jnp.exp(x[..., -n:])
return z + jnp.expand_dims(diag, axis=-1) * jnp.identity(n)
def _inverse(self, y):
z = matrix_to_tril_vec(y, diagonal=-1)
return jnp.concatenate([z, jnp.log(jnp.diagonal(y, axis1=-2, axis2=-1))], axis=-1)
def log_abs_det_jacobian(self, x, y, intermediates=None):
# the jacobian is diagonal, so logdet is the sum of diagonal `exp` transform
n = round((math.sqrt(1 + 8 * x.shape[-1]) - 1) / 2)
return x[..., -n:].sum(-1)
def forward_shape(self, shape):
return _matrix_forward_shape(shape)
def inverse_shape(self, shape):
return _matrix_inverse_shape(shape)
class OrderedTransform(Transform):
"""
Transform a real vector to an ordered vector.
**References:**
1. *Stan Reference Manual v2.20, section 10.6*,
Stan Development Team
"""
domain = constraints.real_vector
codomain = constraints.ordered_vector
def __call__(self, x):
z = jnp.concatenate([x[..., :1], jnp.exp(x[..., 1:])], axis=-1)
return jnp.cumsum(z, axis=-1)
def _inverse(self, y):
x = jnp.log(y[..., 1:] - y[..., :-1])
return jnp.concatenate([y[..., :1], x], axis=-1)
def log_abs_det_jacobian(self, x, y, intermediates=None):
return jnp.sum(x[..., 1:], -1)
class PermuteTransform(Transform):
domain = constraints.real_vector
codomain = constraints.real_vector
def __init__(self, permutation):
self.permutation = permutation
def __call__(self, x):
return x[..., self.permutation]
def _inverse(self, y):
size = self.permutation.size
permutation_inv = ops.index_update(jnp.zeros(size, dtype=jnp.result_type(int)),
self.permutation,
jnp.arange(size))
return y[..., permutation_inv]
def log_abs_det_jacobian(self, x, y, intermediates=None):
return jnp.full(jnp.shape(x)[:-1], 0.)
class PowerTransform(Transform):
domain = constraints.positive
codomain = constraints.positive
def __init__(self, exponent):
self.exponent = exponent
def __call__(self, x):
return jnp.power(x, self.exponent)
def _inverse(self, y):
return jnp.power(y, 1 / self.exponent)
def log_abs_det_jacobian(self, x, y, intermediates=None):
return jnp.log(jnp.abs(self.exponent * y / x))
def forward_shape(self, shape):
return lax.broadcast_shapes(shape, getattr(self.exponent, "shape", ()))
def inverse_shape(self, shape):
return lax.broadcast_shapes(shape, getattr(self.exponent, "shape", ()))
class SigmoidTransform(Transform):
codomain = constraints.unit_interval
def __call__(self, x):
return _clipped_expit(x)
def _inverse(self, y):
return logit(y)
def log_abs_det_jacobian(self, x, y, intermediates=None):
x_abs = jnp.abs(x)
return -x_abs - 2 * jnp.log1p(jnp.exp(-x_abs))
def _softplus_inv(y):
return jnp.log(-jnp.expm1(-y)) + y
class SoftplusTransform(Transform):
r"""
Transform from unconstrained space to positive domain via softplus :math:`y = \log(1 + \exp(x))`.
The inverse is computed as :math:`x = \log(\exp(y) - 1)`.
"""
domain = constraints.real
codomain = constraints.softplus_positive
def __call__(self, x):
return softplus(x)
def _inverse(self, y):
return _softplus_inv(y)
def log_abs_det_jacobian(self, x, y, intermediates=None):
return -softplus(-x)
class SoftplusLowerCholeskyTransform(Transform):
"""
Transform from unconstrained vector to lower-triangular matrices with
nonnegative diagonal entries. This is useful for parameterizing positive
definite matrices in terms of their Cholesky factorization.
"""
domain = constraints.real_vector
codomain = constraints.softplus_lower_cholesky
def __call__(self, x):
n = round((math.sqrt(1 + 8 * x.shape[-1]) - 1) / 2)
z = vec_to_tril_matrix(x[..., :-n], diagonal=-1)
diag = softplus(x[..., -n:])
return z + jnp.expand_dims(diag, axis=-1) * jnp.identity(n)
def _inverse(self, y):
z = matrix_to_tril_vec(y, diagonal=-1)
diag = _softplus_inv(jnp.diagonal(y, axis1=-2, axis2=-1))
return jnp.concatenate([z, diag], axis=-1)
def log_abs_det_jacobian(self, x, y, intermediates=None):
# the jacobian is diagonal, so logdet is the sum of diagonal `exp` transform
n = round((math.sqrt(1 + 8 * x.shape[-1]) - 1) / 2)
return -softplus(-x[..., -n:]).sum(-1)
def forward_shape(self, shape):
return _matrix_forward_shape(shape)
def inverse_shape(self, shape):
return _matrix_inverse_shape(shape)
class StickBreakingTransform(Transform):
domain = constraints.real_vector
codomain = constraints.simplex
def __call__(self, x):
# we shift x to obtain a balanced mapping (0, 0, ..., 0) -> (1/K, 1/K, ..., 1/K)
x = x - jnp.log(x.shape[-1] - jnp.arange(x.shape[-1]))
# convert to probabilities (relative to the remaining) of each fraction of the stick
z = _clipped_expit(x)
z1m_cumprod = jnp.cumprod(1 - z, axis=-1)
pad_width = [(0, 0)] * x.ndim
pad_width[-1] = (0, 1)
z_padded = jnp.pad(z, pad_width, mode="constant", constant_values=1.)
pad_width = [(0, 0)] * x.ndim
pad_width[-1] = (1, 0)
z1m_cumprod_shifted = jnp.pad(z1m_cumprod, pad_width, mode="constant", constant_values=1.)
return z_padded * z1m_cumprod_shifted
def _inverse(self, y):
y_crop = y[..., :-1]
z1m_cumprod = jnp.clip(1 - jnp.cumsum(y_crop, axis=-1), a_min=jnp.finfo(y.dtype).tiny)
# hence x = logit(z) = log(z / (1 - z)) = y[::-1] / z1m_cumprod
x = jnp.log(y_crop / z1m_cumprod)
return x + jnp.log(x.shape[-1] - jnp.arange(x.shape[-1]))
def log_abs_det_jacobian(self, x, y, intermediates=None):
# Ref: https://mc-stan.org/docs/2_19/reference-manual/simplex-transform-section.html
# |det|(J) = Product(y * (1 - z))
x = x - jnp.log(x.shape[-1] - jnp.arange(x.shape[-1]))
z = jnp.clip(expit(x), a_min=jnp.finfo(x.dtype).tiny)
# XXX we use the identity 1 - z = z * exp(-x) to not worry about
# the case z ~ 1
return jnp.sum(jnp.log(y[..., :-1] * z) - x, axis=-1)
def forward_shape(self, shape):
if len(shape) < 1:
raise ValueError("Too few dimensions on input")
return shape[:-1] + (shape[-1] + 1,)
def inverse_shape(self, shape):
if len(shape) < 1:
raise ValueError("Too few dimensions on input")
return shape[:-1] + (shape[-1] - 1,)
class UnpackTransform(Transform):
"""
Transforms a contiguous array to a pytree of subarrays.
:param unpack_fn: callable used to unpack a contiguous array.
"""
domain = constraints.real_vector
codomain = constraints.dependent
def __init__(self, unpack_fn):
self.unpack_fn = unpack_fn
def __call__(self, x):
batch_shape = x.shape[:-1]
if batch_shape:
unpacked = vmap(self.unpack_fn)(x.reshape((-1,) + x.shape[-1:]))
return tree_map(lambda z: jnp.reshape(z, batch_shape + z.shape[1:]), unpacked)
else:
return self.unpack_fn(x)
def _inverse(self, y):
leading_dims = [v.shape[0] if jnp.ndim(v) > 0 else 0
for v in tree_flatten(y)[0]]
d0 = leading_dims[0]
not_scalar = d0 > 0 or len(leading_dims) > 1
if not_scalar and all(d == d0 for d in leading_dims[1:]):
warnings.warn("UnpackTransform.inv might lead to an unexpected behavior because it"
" cannot transform a batch of unpacked arrays.")
return ravel_pytree(y)[0]
def log_abs_det_jacobian(self, x, y, intermediates=None):
return jnp.zeros(jnp.shape(x)[:-1])
def forward_shape(self, shape):
raise NotImplementedError
def inverse_shape(self, shape):
raise NotImplementedError
##########################################################
# CONSTRAINT_REGISTRY
##########################################################
class ConstraintRegistry(object):
def __init__(self):
self._registry = {}
def register(self, constraint, factory=None):
if factory is None:
return lambda factory: self.register(constraint, factory)
if isinstance(constraint, constraints.Constraint):
constraint = type(constraint)
self._registry[constraint] = factory
def __call__(self, constraint):
try:
factory = self._registry[type(constraint)]
except KeyError as e:
raise NotImplementedError from e
return factory(constraint)
biject_to = ConstraintRegistry()
@biject_to.register(constraints.corr_cholesky)
def _transform_to_corr_cholesky(constraint):
return CorrCholeskyTransform()
@biject_to.register(constraints.corr_matrix)
def _transform_to_corr_matrix(constraint):
return ComposeTransform([CorrCholeskyTransform(),
CorrMatrixCholeskyTransform().inv])
@biject_to.register(constraints.greater_than)
def _transform_to_greater_than(constraint):
if constraint is constraints.positive:
return ExpTransform()
return ComposeTransform([ExpTransform(),
AffineTransform(constraint.lower_bound, 1,
domain=constraints.positive)])
@biject_to.register(constraints.less_than)
def _transform_to_less_than(constraint):
return ComposeTransform([ExpTransform(),
AffineTransform(constraint.upper_bound, -1,
domain=constraints.positive)])
@biject_to.register(constraints.independent)
def _biject_to_independent(constraint):
return IndependentTransform(biject_to(constraint.base_constraint),
constraint.reinterpreted_batch_ndims)
@biject_to.register(constraints.interval)
def _transform_to_interval(constraint):
if constraint is constraints.unit_interval:
return SigmoidTransform()
scale = constraint.upper_bound - constraint.lower_bound
return ComposeTransform([SigmoidTransform(),
AffineTransform(constraint.lower_bound, scale,
domain=constraints.unit_interval)])
@biject_to.register(constraints.lower_cholesky)
def _transform_to_lower_cholesky(constraint):
return LowerCholeskyTransform()
@biject_to.register(constraints.ordered_vector)
def _transform_to_ordered_vector(constraint):
return OrderedTransform()
@biject_to.register(constraints.positive_definite)
def _transform_to_positive_definite(constraint):
return ComposeTransform([LowerCholeskyTransform(), CholeskyTransform().inv])
@biject_to.register(constraints.positive_ordered_vector)
def _transform_to_positive_ordered_vector(constraint):
return ComposeTransform([OrderedTransform(), ExpTransform()])
@biject_to.register(constraints.real)
def _transform_to_real(constraint):
return IdentityTransform()
@biject_to.register(constraints.softplus_positive)
def _transform_to_softplus_positive(constraint):
return SoftplusTransform()
@biject_to.register(constraints.softplus_lower_cholesky)
def _transform_to_softplus_lower_cholesky(constraint):
return SoftplusLowerCholeskyTransform()
@biject_to.register(constraints.simplex)
def _transform_to_simplex(constraint):
return StickBreakingTransform()
| 34.851852 | 120 | 0.637559 |
import math
import warnings
import weakref
import numpy as np
from jax import lax, ops, tree_flatten, tree_map, vmap
from jax.flatten_util import ravel_pytree
from jax.nn import softplus
import jax.numpy as jnp
from jax.scipy.linalg import solve_triangular
from jax.scipy.special import expit, logit
from numpyro.distributions import constraints
from numpyro.distributions.util import matrix_to_tril_vec, signed_stick_breaking_tril, sum_rightmost, vec_to_tril_matrix
from numpyro.util import not_jax_tracer
__all__ = [
'biject_to',
'AbsTransform',
'AffineTransform',
'CholeskyTransform',
'ComposeTransform',
'CorrCholeskyTransform',
'CorrMatrixCholeskyTransform',
'ExpTransform',
'SoftplusTransform',
'IdentityTransform',
'InvCholeskyTransform',
'LowerCholeskyTransform',
'LowerCholeskyAffine',
'PermuteTransform',
'PowerTransform',
'SigmoidTransform',
'SoftplusTransform',
'SoftplusLowerCholeskyTransform',
'StickBreakingTransform',
'Transform',
'UnpackTransform',
]
def _clipped_expit(x):
finfo = jnp.finfo(jnp.result_type(x))
return jnp.clip(expit(x), a_min=finfo.tiny, a_max=1. - finfo.eps)
class Transform(object):
domain = constraints.real
codomain = constraints.real
_inv = None
@property
def event_dim(self):
warnings.warn("transform.event_dim is deprecated. Please use Transform.domain.event_dim to "
"get input event dim or Transform.codomain.event_dim to get output event dim.",
FutureWarning)
return self.domain.event_dim
@property
def inv(self):
inv = None
if self._inv is not None:
inv = self._inv()
if inv is None:
inv = _InverseTransform(self)
self._inv = weakref.ref(inv)
return inv
def __call__(self, x):
return NotImplementedError
def _inverse(self, y):
raise NotImplementedError
def log_abs_det_jacobian(self, x, y, intermediates=None):
raise NotImplementedError
def call_with_intermediates(self, x):
return self(x), None
def forward_shape(self, shape):
return shape
def inverse_shape(self, shape):
return shape
class _InverseTransform(Transform):
def __init__(self, transform):
super().__init__()
self._inv = transform
@property
def domain(self):
return self._inv.codomain
@property
def codomain(self):
return self._inv.domain
@property
def inv(self):
return self._inv
def __call__(self, x):
return self._inv._inverse(x)
def log_abs_det_jacobian(self, x, y, intermediates=None):
return -self._inv.log_abs_det_jacobian(y, x, None)
def forward_shape(self, shape):
return self._inv.inverse_shape(shape)
def inverse_shape(self, shape):
return self._inv.forward_shape(shape)
class AbsTransform(Transform):
domain = constraints.real
codomain = constraints.positive
def __eq__(self, other):
return isinstance(other, AbsTransform)
def __call__(self, x):
return jnp.abs(x)
def _inverse(self, y):
return y
class AffineTransform(Transform):
def __init__(self, loc, scale, domain=constraints.real):
self.loc = loc
self.scale = scale
self.domain = domain
@property
def codomain(self):
if self.domain is constraints.real:
return constraints.real
elif isinstance(self.domain, constraints.greater_than):
if not_jax_tracer(self.scale) and np.all(np.less(self.scale, 0)):
return constraints.less_than(self(self.domain.lower_bound))
# we suppose scale > 0 for any tracer
else:
return constraints.greater_than(self(self.domain.lower_bound))
elif isinstance(self.domain, constraints.less_than):
if not_jax_tracer(self.scale) and np.all(np.less(self.scale, 0)):
return constraints.greater_than(self(self.domain.upper_bound))
# we suppose scale > 0 for any tracer
else:
return constraints.less_than(self(self.domain.upper_bound))
elif isinstance(self.domain, constraints.interval):
if not_jax_tracer(self.scale) and np.all(np.less(self.scale, 0)):
return constraints.interval(self(self.domain.upper_bound),
self(self.domain.lower_bound))
else:
return constraints.interval(self(self.domain.lower_bound),
self(self.domain.upper_bound))
else:
raise NotImplementedError
def __call__(self, x):
return self.loc + self.scale * x
def _inverse(self, y):
return (y - self.loc) / self.scale
def log_abs_det_jacobian(self, x, y, intermediates=None):
return jnp.broadcast_to(jnp.log(jnp.abs(self.scale)), jnp.shape(x))
def forward_shape(self, shape):
return lax.broadcast_shapes(shape,
getattr(self.loc, "shape", ()),
getattr(self.scale, "shape", ()))
def inverse_shape(self, shape):
return lax.broadcast_shapes(shape,
getattr(self.loc, "shape", ()),
getattr(self.scale, "shape", ()))
def _get_compose_transform_input_event_dim(parts):
input_event_dim = parts[-1].domain.event_dim
for part in parts[len(parts) - 1::-1]:
input_event_dim = part.domain.event_dim + max(input_event_dim - part.codomain.event_dim, 0)
return input_event_dim
def _get_compose_transform_output_event_dim(parts):
output_event_dim = parts[0].codomain.event_dim
for part in parts[1:]:
output_event_dim = part.codomain.event_dim + max(output_event_dim - part.domain.event_dim, 0)
return output_event_dim
class ComposeTransform(Transform):
def __init__(self, parts):
self.parts = parts
@property
def domain(self):
input_event_dim = _get_compose_transform_input_event_dim(self.parts)
first_input_event_dim = self.parts[0].domain.event_dim
assert input_event_dim >= first_input_event_dim
if input_event_dim == first_input_event_dim:
return self.parts[0].domain
else:
return constraints.independent(self.parts[0].domain, input_event_dim - first_input_event_dim)
@property
def codomain(self):
output_event_dim = _get_compose_transform_output_event_dim(self.parts)
last_output_event_dim = self.parts[-1].codomain.event_dim
assert output_event_dim >= last_output_event_dim
if output_event_dim == last_output_event_dim:
return self.parts[-1].codomain
else:
return constraints.independent(self.parts[-1].codomain, output_event_dim - last_output_event_dim)
def __call__(self, x):
for part in self.parts:
x = part(x)
return x
def _inverse(self, y):
for part in self.parts[::-1]:
y = part.inv(y)
return y
def log_abs_det_jacobian(self, x, y, intermediates=None):
if intermediates is not None:
if len(intermediates) != len(self.parts):
raise ValueError('Intermediates array has length = {}. Expected = {}.'
.format(len(intermediates), len(self.parts)))
result = 0.
input_event_dim = self.domain.event_dim
for i, part in enumerate(self.parts[:-1]):
y_tmp = part(x) if intermediates is None else intermediates[i][0]
inter = None if intermediates is None else intermediates[i][1]
logdet = part.log_abs_det_jacobian(x, y_tmp, intermediates=inter)
batch_ndim = input_event_dim - part.domain.event_dim
result = result + sum_rightmost(logdet, batch_ndim)
input_event_dim = part.codomain.event_dim + batch_ndim
x = y_tmp
# account the the last transform, where y is available
inter = None if intermediates is None else intermediates[-1]
part = self.parts[-1]
logdet = part.log_abs_det_jacobian(x, y, intermediates=inter)
result = result + sum_rightmost(logdet, input_event_dim - part.domain.event_dim)
return result
def call_with_intermediates(self, x):
intermediates = []
for part in self.parts[:-1]:
x, inter = part.call_with_intermediates(x)
intermediates.append([x, inter])
# NB: we don't need to hold the last output value in `intermediates`
x, inter = self.parts[-1].call_with_intermediates(x)
intermediates.append(inter)
return x, intermediates
def forward_shape(self, shape):
for part in self.parts:
shape = part.forward_shape(shape)
return shape
def inverse_shape(self, shape):
for part in reversed(self.parts):
shape = part.inverse_shape(shape)
return shape
def _matrix_forward_shape(shape, offset=0):
if len(shape) < 1:
raise ValueError("Too few dimensions in input")
N = shape[-1]
D = round((0.25 + 2 * N) ** 0.5 - 0.5)
if D * (D + 1) // 2 != N:
raise ValueError("Input is not a flattend lower-diagonal number")
D = D - offset
return shape[:-1] + (D, D)
def _matrix_inverse_shape(shape, offset=0):
if len(shape) < 2:
raise ValueError("Too few dimensions on input")
if shape[-2] != shape[-1]:
raise ValueError("Input is not square")
D = shape[-1] + offset
N = D * (D + 1) // 2
return shape[:-2] + (N,)
class CholeskyTransform(Transform):
domain = constraints.positive_definite
codomain = constraints.lower_cholesky
def __call__(self, x):
return jnp.linalg.cholesky(x)
def _inverse(self, y):
return jnp.matmul(y, jnp.swapaxes(y, -2, -1))
def log_abs_det_jacobian(self, x, y, intermediates=None):
n = jnp.shape(x)[-1]
order = -jnp.arange(n, 0, -1)
return -n * jnp.log(2) + jnp.sum(order * jnp.log(jnp.diagonal(y, axis1=-2, axis2=-1)), axis=-1)
class CorrCholeskyTransform(Transform):
domain = constraints.real_vector
codomain = constraints.corr_cholesky
def __call__(self, x):
t = jnp.tanh(x)
return signed_stick_breaking_tril(t)
def _inverse(self, y):
z1m_cumprod = 1 - jnp.cumsum(y * y, axis=-1)
pad_width = [(0, 0)] * y.ndim
pad_width[-1] = (1, 0)
z1m_cumprod_shifted = jnp.pad(z1m_cumprod[..., :-1], pad_width,
mode="constant", constant_values=1.)
t = matrix_to_tril_vec(y, diagonal=-1) / jnp.sqrt(
matrix_to_tril_vec(z1m_cumprod_shifted, diagonal=-1))
x = jnp.log((1 + t) / (1 - t)) / 2
return x
def log_abs_det_jacobian(self, x, y, intermediates=None):
z1m_cumprod = 1 - jnp.cumsum(y * y, axis=-1)
# NB: diagonal=-2 works fine for (2 x 2) matrix, where we get an empty array
z1m_cumprod_tril = matrix_to_tril_vec(z1m_cumprod, diagonal=-2)
stick_breaking_logdet = 0.5 * jnp.sum(jnp.log(z1m_cumprod_tril), axis=-1)
tanh_logdet = -2 * jnp.sum(x + softplus(-2 * x) - jnp.log(2.), axis=-1)
return stick_breaking_logdet + tanh_logdet
def forward_shape(self, shape):
return _matrix_forward_shape(shape, offset=-1)
def inverse_shape(self, shape):
return _matrix_inverse_shape(shape, offset=-1)
class CorrMatrixCholeskyTransform(CholeskyTransform):
domain = constraints.corr_matrix
codomain = constraints.corr_cholesky
def log_abs_det_jacobian(self, x, y, intermediates=None):
# NB: see derivation in LKJCholesky implementation
n = jnp.shape(x)[-1]
order = -jnp.arange(n - 1, -1, -1)
return jnp.sum(order * jnp.log(jnp.diagonal(y, axis1=-2, axis2=-1)), axis=-1)
class ExpTransform(Transform):
# TODO: refine domain/codomain logic through setters, especially when
# transforms for inverses are supported
def __init__(self, domain=constraints.real):
self.domain = domain
@property
def codomain(self):
if self.domain is constraints.real:
return constraints.positive
elif isinstance(self.domain, constraints.greater_than):
return constraints.greater_than(self.__call__(self.domain.lower_bound))
elif isinstance(self.domain, constraints.interval):
return constraints.interval(self.__call__(self.domain.lower_bound),
self.__call__(self.domain.upper_bound))
else:
raise NotImplementedError
def __call__(self, x):
# XXX consider to clamp from below for stability if necessary
return jnp.exp(x)
def _inverse(self, y):
return jnp.log(y)
def log_abs_det_jacobian(self, x, y, intermediates=None):
return x
class IdentityTransform(Transform):
def __call__(self, x):
return x
def _inverse(self, y):
return y
def log_abs_det_jacobian(self, x, y, intermediates=None):
return jnp.zeros_like(x)
class IndependentTransform(Transform):
def __init__(self, base_transform, reinterpreted_batch_ndims):
assert isinstance(base_transform, Transform)
assert isinstance(reinterpreted_batch_ndims, int)
assert reinterpreted_batch_ndims >= 0
self.base_transform = base_transform
self.reinterpreted_batch_ndims = reinterpreted_batch_ndims
super().__init__()
@property
def domain(self):
return constraints.independent(self.base_transform.domain, self.reinterpreted_batch_ndims)
@property
def codomain(self):
return constraints.independent(self.base_transform.codomain, self.reinterpreted_batch_ndims)
def __call__(self, x):
return self.base_transform(x)
def _inverse(self, y):
return self.base_transform._inverse(y)
def log_abs_det_jacobian(self, x, y, intermediates=None):
result = self.base_transform.log_abs_det_jacobian(x, y, intermediates=intermediates)
if jnp.ndim(result) < self.reinterpreted_batch_ndims:
expected = self.domain.event_dim
raise ValueError(f"Expected x.dim() >= {expected} but got {jnp.ndim(x)}")
return sum_rightmost(result, self.reinterpreted_batch_ndims)
def call_with_intermediates(self, x):
return self.base_transform.call_with_intermediates(x)
def forward_shape(self, shape):
return self.base_transform.forward_shape(shape)
def inverse_shape(self, shape):
return self.base_transform.inverse_shape(shape)
class InvCholeskyTransform(Transform):
def __init__(self, domain=constraints.lower_cholesky):
warnings.warn("InvCholeskyTransform is deprecated. Please use CholeskyTransform"
" or CorrMatrixCholeskyTransform instead.", FutureWarning)
assert domain in [constraints.lower_cholesky, constraints.corr_cholesky]
self.domain = domain
@property
def codomain(self):
if self.domain is constraints.lower_cholesky:
return constraints.positive_definite
elif self.domain is constraints.corr_cholesky:
return constraints.corr_matrix
def __call__(self, x):
return jnp.matmul(x, jnp.swapaxes(x, -2, -1))
def _inverse(self, y):
return jnp.linalg.cholesky(y)
def log_abs_det_jacobian(self, x, y, intermediates=None):
if self.domain is constraints.lower_cholesky:
# Ref: http://web.mit.edu/18.325/www/handouts/handout2.pdf page 13
n = jnp.shape(x)[-1]
order = jnp.arange(n, 0, -1)
return n * jnp.log(2) + jnp.sum(order * jnp.log(jnp.diagonal(x, axis1=-2, axis2=-1)), axis=-1)
else:
# NB: see derivation in LKJCholesky implementation
n = jnp.shape(x)[-1]
order = jnp.arange(n - 1, -1, -1)
return jnp.sum(order * jnp.log(jnp.diagonal(x, axis1=-2, axis2=-1)), axis=-1)
class LowerCholeskyAffine(Transform):
domain = constraints.real_vector
codomain = constraints.real_vector
def __init__(self, loc, scale_tril):
if jnp.ndim(scale_tril) != 2:
raise ValueError("Only support 2-dimensional scale_tril matrix. "
"Please make a feature request if you need to "
"use this transform with batched scale_tril.")
self.loc = loc
self.scale_tril = scale_tril
def __call__(self, x):
return self.loc + jnp.squeeze(jnp.matmul(self.scale_tril, x[..., jnp.newaxis]), axis=-1)
def _inverse(self, y):
y = y - self.loc
original_shape = jnp.shape(y)
yt = jnp.reshape(y, (-1, original_shape[-1])).T
xt = solve_triangular(self.scale_tril, yt, lower=True)
return jnp.reshape(xt.T, original_shape)
def log_abs_det_jacobian(self, x, y, intermediates=None):
return jnp.broadcast_to(jnp.log(jnp.diagonal(self.scale_tril, axis1=-2, axis2=-1)).sum(-1),
jnp.shape(x)[:-1])
def forward_shape(self, shape):
if len(shape) < 1:
raise ValueError("Too few dimensions on input")
return lax.broadcast_shapes(shape, self.loc.shape, self.scale_tril.shape[:-1])
def inverse_shape(self, shape):
if len(shape) < 1:
raise ValueError("Too few dimensions on input")
return lax.broadcast_shapes(shape, self.loc.shape, self.scale_tril.shape[:-1])
class LowerCholeskyTransform(Transform):
domain = constraints.real_vector
codomain = constraints.lower_cholesky
def __call__(self, x):
n = round((math.sqrt(1 + 8 * x.shape[-1]) - 1) / 2)
z = vec_to_tril_matrix(x[..., :-n], diagonal=-1)
diag = jnp.exp(x[..., -n:])
return z + jnp.expand_dims(diag, axis=-1) * jnp.identity(n)
def _inverse(self, y):
z = matrix_to_tril_vec(y, diagonal=-1)
return jnp.concatenate([z, jnp.log(jnp.diagonal(y, axis1=-2, axis2=-1))], axis=-1)
def log_abs_det_jacobian(self, x, y, intermediates=None):
# the jacobian is diagonal, so logdet is the sum of diagonal `exp` transform
n = round((math.sqrt(1 + 8 * x.shape[-1]) - 1) / 2)
return x[..., -n:].sum(-1)
def forward_shape(self, shape):
return _matrix_forward_shape(shape)
def inverse_shape(self, shape):
return _matrix_inverse_shape(shape)
class OrderedTransform(Transform):
domain = constraints.real_vector
codomain = constraints.ordered_vector
def __call__(self, x):
z = jnp.concatenate([x[..., :1], jnp.exp(x[..., 1:])], axis=-1)
return jnp.cumsum(z, axis=-1)
def _inverse(self, y):
x = jnp.log(y[..., 1:] - y[..., :-1])
return jnp.concatenate([y[..., :1], x], axis=-1)
def log_abs_det_jacobian(self, x, y, intermediates=None):
return jnp.sum(x[..., 1:], -1)
class PermuteTransform(Transform):
domain = constraints.real_vector
codomain = constraints.real_vector
def __init__(self, permutation):
self.permutation = permutation
def __call__(self, x):
return x[..., self.permutation]
def _inverse(self, y):
size = self.permutation.size
permutation_inv = ops.index_update(jnp.zeros(size, dtype=jnp.result_type(int)),
self.permutation,
jnp.arange(size))
return y[..., permutation_inv]
def log_abs_det_jacobian(self, x, y, intermediates=None):
return jnp.full(jnp.shape(x)[:-1], 0.)
class PowerTransform(Transform):
domain = constraints.positive
codomain = constraints.positive
def __init__(self, exponent):
self.exponent = exponent
def __call__(self, x):
return jnp.power(x, self.exponent)
def _inverse(self, y):
return jnp.power(y, 1 / self.exponent)
def log_abs_det_jacobian(self, x, y, intermediates=None):
return jnp.log(jnp.abs(self.exponent * y / x))
def forward_shape(self, shape):
return lax.broadcast_shapes(shape, getattr(self.exponent, "shape", ()))
def inverse_shape(self, shape):
return lax.broadcast_shapes(shape, getattr(self.exponent, "shape", ()))
class SigmoidTransform(Transform):
codomain = constraints.unit_interval
def __call__(self, x):
return _clipped_expit(x)
def _inverse(self, y):
return logit(y)
def log_abs_det_jacobian(self, x, y, intermediates=None):
x_abs = jnp.abs(x)
return -x_abs - 2 * jnp.log1p(jnp.exp(-x_abs))
def _softplus_inv(y):
return jnp.log(-jnp.expm1(-y)) + y
class SoftplusTransform(Transform):
domain = constraints.real
codomain = constraints.softplus_positive
def __call__(self, x):
return softplus(x)
def _inverse(self, y):
return _softplus_inv(y)
def log_abs_det_jacobian(self, x, y, intermediates=None):
return -softplus(-x)
class SoftplusLowerCholeskyTransform(Transform):
domain = constraints.real_vector
codomain = constraints.softplus_lower_cholesky
def __call__(self, x):
n = round((math.sqrt(1 + 8 * x.shape[-1]) - 1) / 2)
z = vec_to_tril_matrix(x[..., :-n], diagonal=-1)
diag = softplus(x[..., -n:])
return z + jnp.expand_dims(diag, axis=-1) * jnp.identity(n)
def _inverse(self, y):
z = matrix_to_tril_vec(y, diagonal=-1)
diag = _softplus_inv(jnp.diagonal(y, axis1=-2, axis2=-1))
return jnp.concatenate([z, diag], axis=-1)
def log_abs_det_jacobian(self, x, y, intermediates=None):
# the jacobian is diagonal, so logdet is the sum of diagonal `exp` transform
n = round((math.sqrt(1 + 8 * x.shape[-1]) - 1) / 2)
return -softplus(-x[..., -n:]).sum(-1)
def forward_shape(self, shape):
return _matrix_forward_shape(shape)
def inverse_shape(self, shape):
return _matrix_inverse_shape(shape)
class StickBreakingTransform(Transform):
domain = constraints.real_vector
codomain = constraints.simplex
def __call__(self, x):
# we shift x to obtain a balanced mapping (0, 0, ..., 0) -> (1/K, 1/K, ..., 1/K)
x = x - jnp.log(x.shape[-1] - jnp.arange(x.shape[-1]))
# convert to probabilities (relative to the remaining) of each fraction of the stick
z = _clipped_expit(x)
z1m_cumprod = jnp.cumprod(1 - z, axis=-1)
pad_width = [(0, 0)] * x.ndim
pad_width[-1] = (0, 1)
z_padded = jnp.pad(z, pad_width, mode="constant", constant_values=1.)
pad_width = [(0, 0)] * x.ndim
pad_width[-1] = (1, 0)
z1m_cumprod_shifted = jnp.pad(z1m_cumprod, pad_width, mode="constant", constant_values=1.)
return z_padded * z1m_cumprod_shifted
def _inverse(self, y):
y_crop = y[..., :-1]
z1m_cumprod = jnp.clip(1 - jnp.cumsum(y_crop, axis=-1), a_min=jnp.finfo(y.dtype).tiny)
# hence x = logit(z) = log(z / (1 - z)) = y[::-1] / z1m_cumprod
x = jnp.log(y_crop / z1m_cumprod)
return x + jnp.log(x.shape[-1] - jnp.arange(x.shape[-1]))
def log_abs_det_jacobian(self, x, y, intermediates=None):
# Ref: https://mc-stan.org/docs/2_19/reference-manual/simplex-transform-section.html
# |det|(J) = Product(y * (1 - z))
x = x - jnp.log(x.shape[-1] - jnp.arange(x.shape[-1]))
z = jnp.clip(expit(x), a_min=jnp.finfo(x.dtype).tiny)
# XXX we use the identity 1 - z = z * exp(-x) to not worry about
# the case z ~ 1
return jnp.sum(jnp.log(y[..., :-1] * z) - x, axis=-1)
def forward_shape(self, shape):
if len(shape) < 1:
raise ValueError("Too few dimensions on input")
return shape[:-1] + (shape[-1] + 1,)
def inverse_shape(self, shape):
if len(shape) < 1:
raise ValueError("Too few dimensions on input")
return shape[:-1] + (shape[-1] - 1,)
class UnpackTransform(Transform):
domain = constraints.real_vector
codomain = constraints.dependent
def __init__(self, unpack_fn):
self.unpack_fn = unpack_fn
def __call__(self, x):
batch_shape = x.shape[:-1]
if batch_shape:
unpacked = vmap(self.unpack_fn)(x.reshape((-1,) + x.shape[-1:]))
return tree_map(lambda z: jnp.reshape(z, batch_shape + z.shape[1:]), unpacked)
else:
return self.unpack_fn(x)
def _inverse(self, y):
leading_dims = [v.shape[0] if jnp.ndim(v) > 0 else 0
for v in tree_flatten(y)[0]]
d0 = leading_dims[0]
not_scalar = d0 > 0 or len(leading_dims) > 1
if not_scalar and all(d == d0 for d in leading_dims[1:]):
warnings.warn("UnpackTransform.inv might lead to an unexpected behavior because it"
" cannot transform a batch of unpacked arrays.")
return ravel_pytree(y)[0]
def log_abs_det_jacobian(self, x, y, intermediates=None):
return jnp.zeros(jnp.shape(x)[:-1])
def forward_shape(self, shape):
raise NotImplementedError
def inverse_shape(self, shape):
raise NotImplementedError
##########################################################
# CONSTRAINT_REGISTRY
##########################################################
class ConstraintRegistry(object):
def __init__(self):
self._registry = {}
def register(self, constraint, factory=None):
if factory is None:
return lambda factory: self.register(constraint, factory)
if isinstance(constraint, constraints.Constraint):
constraint = type(constraint)
self._registry[constraint] = factory
def __call__(self, constraint):
try:
factory = self._registry[type(constraint)]
except KeyError as e:
raise NotImplementedError from e
return factory(constraint)
biject_to = ConstraintRegistry()
@biject_to.register(constraints.corr_cholesky)
def _transform_to_corr_cholesky(constraint):
return CorrCholeskyTransform()
@biject_to.register(constraints.corr_matrix)
def _transform_to_corr_matrix(constraint):
return ComposeTransform([CorrCholeskyTransform(),
CorrMatrixCholeskyTransform().inv])
@biject_to.register(constraints.greater_than)
def _transform_to_greater_than(constraint):
if constraint is constraints.positive:
return ExpTransform()
return ComposeTransform([ExpTransform(),
AffineTransform(constraint.lower_bound, 1,
domain=constraints.positive)])
@biject_to.register(constraints.less_than)
def _transform_to_less_than(constraint):
return ComposeTransform([ExpTransform(),
AffineTransform(constraint.upper_bound, -1,
domain=constraints.positive)])
@biject_to.register(constraints.independent)
def _biject_to_independent(constraint):
return IndependentTransform(biject_to(constraint.base_constraint),
constraint.reinterpreted_batch_ndims)
@biject_to.register(constraints.interval)
def _transform_to_interval(constraint):
if constraint is constraints.unit_interval:
return SigmoidTransform()
scale = constraint.upper_bound - constraint.lower_bound
return ComposeTransform([SigmoidTransform(),
AffineTransform(constraint.lower_bound, scale,
domain=constraints.unit_interval)])
@biject_to.register(constraints.lower_cholesky)
def _transform_to_lower_cholesky(constraint):
return LowerCholeskyTransform()
@biject_to.register(constraints.ordered_vector)
def _transform_to_ordered_vector(constraint):
return OrderedTransform()
@biject_to.register(constraints.positive_definite)
def _transform_to_positive_definite(constraint):
return ComposeTransform([LowerCholeskyTransform(), CholeskyTransform().inv])
@biject_to.register(constraints.positive_ordered_vector)
def _transform_to_positive_ordered_vector(constraint):
return ComposeTransform([OrderedTransform(), ExpTransform()])
@biject_to.register(constraints.real)
def _transform_to_real(constraint):
return IdentityTransform()
@biject_to.register(constraints.softplus_positive)
def _transform_to_softplus_positive(constraint):
return SoftplusTransform()
@biject_to.register(constraints.softplus_lower_cholesky)
def _transform_to_softplus_lower_cholesky(constraint):
return SoftplusLowerCholeskyTransform()
@biject_to.register(constraints.simplex)
def _transform_to_simplex(constraint):
return StickBreakingTransform()
| true | true |
f71af30647e8b3464f41ad8052f431bd92a2243e | 2,623 | py | Python | christmas_lights/Sprite1d.py | rec/christmas_lights | da72b3941b097b6854ba1ba999c0a6cf9b029b0f | [
"MIT"
] | 1 | 2019-05-26T15:10:04.000Z | 2019-05-26T15:10:04.000Z | christmas_lights/Sprite1d.py | rec/christmas_lights | da72b3941b097b6854ba1ba999c0a6cf9b029b0f | [
"MIT"
] | null | null | null | christmas_lights/Sprite1d.py | rec/christmas_lights | da72b3941b097b6854ba1ba999c0a6cf9b029b0f | [
"MIT"
] | null | null | null | import numbers, random
class Sprite1d:
"""A one-dimensional sprite with subpixel positioning."""
def __init__(self, icon, color_list, speed=0, acceleration=0, bound=(0, 1),
position=0, center=None):
self.color_list = color_list
if hasattr(color_list, 'dtype'):
self._combine = self._combine_numpy
self.icon = icon
self.speed = to_number(speed)
self.acceleration = to_number(acceleration)
self.bound = bound
self.position = to_number(position)
self.center = int(len(self.icon) / 2) if center is None else center
self.fps = 0
def display(self):
# Handle subpixel positioning.
whole, fraction = divmod(self.position * len(self.color_list), 1)
left = int(whole) - self.center
right = left + len(self.icon)
self._add(left, right, 1 - fraction)
if fraction:
self._add(left + 1, right + 1, fraction)
def move(self, amt):
self.position += amt * (self.speed + self.acceleration / 2) / self.fps
self.speed += self.acceleration
def bounce(self):
left, right = self.bound
if self.position < left and self.speed < 0:
self.position = left + (left - self.position)
self.speed = -self.speed
if self.position >= right and self.speed > 0:
self.position = right - (self.position - right)
self.speed = -self.speed
def _combine_numpy(self, left, right, ratio, pixels):
self.color_list[left:right] += ratio * pixels
def _combine(self, left, right, ratio, pixels):
for i in range(left, right):
color = self.color_list[i]
pixel = pixels[i - left]
color = tuple(c + ratio * p for c, p in zip(color, pixel))
def _add(self, left, right, ratio):
pixels = self.icon
# Is the sprite visible?
if right < 0 or left >= len(self.color_list):
return
if left < 0:
# It's partly off the left side.
pixels = pixels[-left:]
left = 0
if right >= len(self.color_list):
# It's partly off the right side.
pixels = pixels[:len(self.color_list) - right - 1]
right = len(self.color_list) - 1
self._combine(left, right, ratio, pixels)
def to_number(x):
if isinstance(x, numbers.Number):
return x
if not x.startswith('rand('):
raise ValueError("Don't understand number '%s'" % x)
lo, hi = (float(i) for i in x[5:-1].split(','))
return random.uniform(lo, hi)
| 33.202532 | 79 | 0.576439 | import numbers, random
class Sprite1d:
def __init__(self, icon, color_list, speed=0, acceleration=0, bound=(0, 1),
position=0, center=None):
self.color_list = color_list
if hasattr(color_list, 'dtype'):
self._combine = self._combine_numpy
self.icon = icon
self.speed = to_number(speed)
self.acceleration = to_number(acceleration)
self.bound = bound
self.position = to_number(position)
self.center = int(len(self.icon) / 2) if center is None else center
self.fps = 0
def display(self):
whole, fraction = divmod(self.position * len(self.color_list), 1)
left = int(whole) - self.center
right = left + len(self.icon)
self._add(left, right, 1 - fraction)
if fraction:
self._add(left + 1, right + 1, fraction)
def move(self, amt):
self.position += amt * (self.speed + self.acceleration / 2) / self.fps
self.speed += self.acceleration
def bounce(self):
left, right = self.bound
if self.position < left and self.speed < 0:
self.position = left + (left - self.position)
self.speed = -self.speed
if self.position >= right and self.speed > 0:
self.position = right - (self.position - right)
self.speed = -self.speed
def _combine_numpy(self, left, right, ratio, pixels):
self.color_list[left:right] += ratio * pixels
def _combine(self, left, right, ratio, pixels):
for i in range(left, right):
color = self.color_list[i]
pixel = pixels[i - left]
color = tuple(c + ratio * p for c, p in zip(color, pixel))
def _add(self, left, right, ratio):
pixels = self.icon
if right < 0 or left >= len(self.color_list):
return
if left < 0:
pixels = pixels[-left:]
left = 0
if right >= len(self.color_list):
# It's partly off the right side.
pixels = pixels[:len(self.color_list) - right - 1]
right = len(self.color_list) - 1
self._combine(left, right, ratio, pixels)
def to_number(x):
if isinstance(x, numbers.Number):
return x
if not x.startswith('rand('):
raise ValueError("Don't understand number '%s'" % x)
lo, hi = (float(i) for i in x[5:-1].split(','))
return random.uniform(lo, hi)
| true | true |
f71af32c0f552806810683bb603031e425d1a879 | 95 | py | Python | core/response/__init__.py | ryanolee/pager-duty-sync | 1fd88634e461b5db647d856bc6b59f990944685e | [
"MIT"
] | null | null | null | core/response/__init__.py | ryanolee/pager-duty-sync | 1fd88634e461b5db647d856bc6b59f990944685e | [
"MIT"
] | 2 | 2020-09-27T18:19:17.000Z | 2021-06-29T09:21:04.000Z | core/response/__init__.py | ryanolee/pager-duty-sync | 1fd88634e461b5db647d856bc6b59f990944685e | [
"MIT"
] | null | null | null | from .response import get_response
from .lambda_proxy_response import get_lambda_proxy_response | 47.5 | 60 | 0.905263 | from .response import get_response
from .lambda_proxy_response import get_lambda_proxy_response | true | true |
f71af42134cc4cc0f0fd59f5b0ef650eed03bbb9 | 2,051 | py | Python | pagarmecoreapi/models/list_customers_response.py | pagarme/pagarme-core-api-python | c7b11ca78ab3e7e896e5b75048e6f72b511db00e | [
"MIT"
] | 6 | 2021-09-02T19:55:04.000Z | 2022-03-16T14:06:15.000Z | pagarmecoreapi/models/list_customers_response.py | pagarme/pagarme-core-api-python | c7b11ca78ab3e7e896e5b75048e6f72b511db00e | [
"MIT"
] | 2 | 2021-10-11T22:48:15.000Z | 2022-01-24T18:24:23.000Z | pagarmecoreapi/models/list_customers_response.py | pagarme/pagarme-core-api-python | c7b11ca78ab3e7e896e5b75048e6f72b511db00e | [
"MIT"
] | 2 | 2021-09-12T21:43:32.000Z | 2022-03-07T16:58:54.000Z | # -*- coding: utf-8 -*-
"""
pagarmecoreapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
import pagarmecoreapi.models.get_customer_response
import pagarmecoreapi.models.paging_response
class ListCustomersResponse(object):
"""Implementation of the 'ListCustomersResponse' model.
Response for listing the customers
Attributes:
data (list of GetCustomerResponse): The customer object
paging (PagingResponse): Paging object
"""
# Create a mapping from Model property names to API property names
_names = {
"data":'data',
"paging":'paging'
}
def __init__(self,
data=None,
paging=None):
"""Constructor for the ListCustomersResponse class"""
# Initialize members of the class
self.data = data
self.paging = paging
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
data = None
if dictionary.get('data') != None:
data = list()
for structure in dictionary.get('data'):
data.append(pagarmecoreapi.models.get_customer_response.GetCustomerResponse.from_dictionary(structure))
paging = pagarmecoreapi.models.paging_response.PagingResponse.from_dictionary(dictionary.get('paging')) if dictionary.get('paging') else None
# Return an object of this model
return cls(data,
paging)
| 29.3 | 150 | 0.611409 |
import pagarmecoreapi.models.get_customer_response
import pagarmecoreapi.models.paging_response
class ListCustomersResponse(object):
_names = {
"data":'data',
"paging":'paging'
}
def __init__(self,
data=None,
paging=None):
self.data = data
self.paging = paging
@classmethod
def from_dictionary(cls,
dictionary):
if dictionary is None:
return None
data = None
if dictionary.get('data') != None:
data = list()
for structure in dictionary.get('data'):
data.append(pagarmecoreapi.models.get_customer_response.GetCustomerResponse.from_dictionary(structure))
paging = pagarmecoreapi.models.paging_response.PagingResponse.from_dictionary(dictionary.get('paging')) if dictionary.get('paging') else None
return cls(data,
paging)
| true | true |
f71af554f8fb983c51ebf2f5979bbb45ab0484fa | 1,434 | py | Python | services/web/server/src/simcore_service_webserver/log.py | colinRawlings/osparc-simcore | bf2f18d5bc1e574d5f4c238d08ad15156184c310 | [
"MIT"
] | 25 | 2018-04-13T12:44:12.000Z | 2022-03-12T15:01:17.000Z | services/web/server/src/simcore_service_webserver/log.py | colinRawlings/osparc-simcore | bf2f18d5bc1e574d5f4c238d08ad15156184c310 | [
"MIT"
] | 2,553 | 2018-01-18T17:11:55.000Z | 2022-03-31T16:26:40.000Z | services/web/server/src/simcore_service_webserver/log.py | odeimaiz/osparc-simcore | 71c2fc58dcfe067487dcd75cb70298a4d6237e97 | [
"MIT"
] | 20 | 2018-01-18T19:45:33.000Z | 2022-03-29T07:08:47.000Z | """ Configuration and utilities for service logging
"""
import logging
from typing import Optional, Union
from aiodebug import log_slow_callbacks
from aiohttp.log import access_logger
from servicelib.logging_utils import config_all_loggers
LOG_LEVEL_STEP = logging.CRITICAL - logging.ERROR
def setup_logging(*, level: Union[str, int], slow_duration: Optional[float] = None):
# service log level
logging.basicConfig(level=level)
# root
logging.root.setLevel(level)
config_all_loggers()
# aiohttp access log-levels
access_logger.setLevel(level)
# keep mostly quiet noisy loggers
quiet_level: int = max(
min(logging.root.level + LOG_LEVEL_STEP, logging.CRITICAL), logging.WARNING
)
logging.getLogger("engineio").setLevel(quiet_level)
logging.getLogger("openapi_spec_validator").setLevel(quiet_level)
logging.getLogger("sqlalchemy").setLevel(quiet_level)
logging.getLogger("sqlalchemy.engine").setLevel(quiet_level)
if slow_duration:
# NOTE: Every task blocking > AIODEBUG_SLOW_DURATION_SECS secs is considered slow and logged as warning
log_slow_callbacks.enable(abs(slow_duration))
def test_logger_propagation(logger: logging.Logger):
msg = f"TESTING %s log with {logger}"
logger.critical(msg, "critical")
logger.error(msg, "error")
logger.info(msg, "info")
logger.warning(msg, "warning")
logger.debug(msg, "debug")
| 30.510638 | 111 | 0.739191 | import logging
from typing import Optional, Union
from aiodebug import log_slow_callbacks
from aiohttp.log import access_logger
from servicelib.logging_utils import config_all_loggers
LOG_LEVEL_STEP = logging.CRITICAL - logging.ERROR
def setup_logging(*, level: Union[str, int], slow_duration: Optional[float] = None):
logging.basicConfig(level=level)
logging.root.setLevel(level)
config_all_loggers()
access_logger.setLevel(level)
quiet_level: int = max(
min(logging.root.level + LOG_LEVEL_STEP, logging.CRITICAL), logging.WARNING
)
logging.getLogger("engineio").setLevel(quiet_level)
logging.getLogger("openapi_spec_validator").setLevel(quiet_level)
logging.getLogger("sqlalchemy").setLevel(quiet_level)
logging.getLogger("sqlalchemy.engine").setLevel(quiet_level)
if slow_duration:
log_slow_callbacks.enable(abs(slow_duration))
def test_logger_propagation(logger: logging.Logger):
msg = f"TESTING %s log with {logger}"
logger.critical(msg, "critical")
logger.error(msg, "error")
logger.info(msg, "info")
logger.warning(msg, "warning")
logger.debug(msg, "debug")
| true | true |
f71af6b92295f3372a61cc87a1cb4e7b3810469d | 336 | py | Python | Algorithms/Sort Array By Parity.py | KushRabadia/Leetcode | f6af5bf0b9ef8daf9870570b52012297128aa9e1 | [
"MIT"
] | null | null | null | Algorithms/Sort Array By Parity.py | KushRabadia/Leetcode | f6af5bf0b9ef8daf9870570b52012297128aa9e1 | [
"MIT"
] | null | null | null | Algorithms/Sort Array By Parity.py | KushRabadia/Leetcode | f6af5bf0b9ef8daf9870570b52012297128aa9e1 | [
"MIT"
] | null | null | null | class Solution(object):
def sortArrayByParity(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
result = []
for i in A:
if i%2 == 0:
result.insert(0,i)
else:
result.append(i)
return result
| 21 | 35 | 0.383929 | class Solution(object):
def sortArrayByParity(self, A):
result = []
for i in A:
if i%2 == 0:
result.insert(0,i)
else:
result.append(i)
return result
| true | true |
f71af7240c9eccec7c0e6d401d254719234a7b2b | 1,119 | py | Python | pyunity/examples/example6/__init__.py | rayzchen/PyUnity | 8ed436eca7a84f05190c1fa275c58da5c6059926 | [
"MIT"
] | null | null | null | pyunity/examples/example6/__init__.py | rayzchen/PyUnity | 8ed436eca7a84f05190c1fa275c58da5c6059926 | [
"MIT"
] | null | null | null | pyunity/examples/example6/__init__.py | rayzchen/PyUnity | 8ed436eca7a84f05190c1fa275c58da5c6059926 | [
"MIT"
] | null | null | null | # Copyright (c) 2020-2022 The PyUnity Team
# This file is licensed under the MIT License.
# See https://docs.pyunity.x10.bz/en/latest/license.html
from pyunity import Behaviour, GameObject, SceneManager, Material, RGB, Mesh, Vector3, MeshRenderer, WaitForSeconds
class Switch(Behaviour):
async def Start(self):
await WaitForSeconds(3)
SceneManager.LoadSceneByIndex(1)
def main():
scene = SceneManager.AddScene("Scene")
scene2 = SceneManager.AddScene("Scene 2")
scene.mainCamera.transform.localPosition = Vector3(0, 0, -10)
scene2.mainCamera.transform.localPosition = Vector3(0, 0, -10)
cube = GameObject("Cube")
renderer = cube.AddComponent(MeshRenderer)
renderer.mesh = Mesh.cube(2)
renderer.mat = Material(RGB(255, 0, 0))
cube.AddComponent(Switch)
scene.Add(cube)
cube2 = GameObject("Cube 2")
renderer = cube2.AddComponent(MeshRenderer)
renderer.mesh = Mesh.cube(2)
renderer.mat = Material(RGB(0, 0, 255))
scene2.Add(cube2)
SceneManager.LoadScene(scene)
if __name__ == "__main__":
main()
| 31.971429 | 116 | 0.680965 |
from pyunity import Behaviour, GameObject, SceneManager, Material, RGB, Mesh, Vector3, MeshRenderer, WaitForSeconds
class Switch(Behaviour):
async def Start(self):
await WaitForSeconds(3)
SceneManager.LoadSceneByIndex(1)
def main():
scene = SceneManager.AddScene("Scene")
scene2 = SceneManager.AddScene("Scene 2")
scene.mainCamera.transform.localPosition = Vector3(0, 0, -10)
scene2.mainCamera.transform.localPosition = Vector3(0, 0, -10)
cube = GameObject("Cube")
renderer = cube.AddComponent(MeshRenderer)
renderer.mesh = Mesh.cube(2)
renderer.mat = Material(RGB(255, 0, 0))
cube.AddComponent(Switch)
scene.Add(cube)
cube2 = GameObject("Cube 2")
renderer = cube2.AddComponent(MeshRenderer)
renderer.mesh = Mesh.cube(2)
renderer.mat = Material(RGB(0, 0, 255))
scene2.Add(cube2)
SceneManager.LoadScene(scene)
if __name__ == "__main__":
main()
| true | true |
f71af7bed48f507c7621116150709b5c2b26365b | 2,239 | py | Python | setup.py | fmarco76/DiscourseSSO | 97d3318c6ebe9cb10af3d5aeaff4da1b60472ff8 | [
"Apache-2.0"
] | 14 | 2015-06-03T09:32:16.000Z | 2021-04-28T13:39:40.000Z | setup.py | fmarco76/DiscourseSSO | 97d3318c6ebe9cb10af3d5aeaff4da1b60472ff8 | [
"Apache-2.0"
] | 3 | 2015-06-03T09:45:04.000Z | 2018-02-21T07:25:47.000Z | setup.py | fmarco76/DiscourseSSO | 97d3318c6ebe9cb10af3d5aeaff4da1b60472ff8 | [
"Apache-2.0"
] | 5 | 2015-05-29T11:23:20.000Z | 2019-09-15T23:54:48.000Z | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, print_function
import io
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
setup(
name='DiscourseSSO',
version='0.1.0',
license='Apache2.0',
description='SSO Discourse Application to allow SAML authentication',
long_description='%s\n%s' % (read('README.rst'), re.sub(':obj:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))),
author='Marco Fargetta',
author_email='marco.fargetta@ct.infn.it',
url='https://github.com/fmarco76/DiscourseSSO',
# packages=find_packages(exclude=['tests*']),
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Utilities',
],
keywords=[
'SAML', 'discourse'
],
install_requires=[
'Flask>=0.10.1'
],
extras_require={
# eg: 'rst': ['docutils>=0.11'],
},
entry_points={
'console_scripts': [
'discroursesso = sso.__main__:main'
]
},
) | 31.985714 | 113 | 0.618133 |
from __future__ import absolute_import, print_function
import io
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
setup(
name='DiscourseSSO',
version='0.1.0',
license='Apache2.0',
description='SSO Discourse Application to allow SAML authentication',
long_description='%s\n%s' % (read('README.rst'), re.sub(':obj:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))),
author='Marco Fargetta',
author_email='marco.fargetta@ct.infn.it',
url='https://github.com/fmarco76/DiscourseSSO',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Utilities',
],
keywords=[
'SAML', 'discourse'
],
install_requires=[
'Flask>=0.10.1'
],
extras_require={
},
entry_points={
'console_scripts': [
'discroursesso = sso.__main__:main'
]
},
) | true | true |
f71af8d2ec295f87bf614c74aa31266459b19da0 | 3,373 | py | Python | stactools_cgls_lc100/stactools/cgls_lc100/stac.py | jonas-eberle/stactools | ce82450343caf71c08f04d2a4a16590285585735 | [
"Apache-2.0"
] | null | null | null | stactools_cgls_lc100/stactools/cgls_lc100/stac.py | jonas-eberle/stactools | ce82450343caf71c08f04d2a4a16590285585735 | [
"Apache-2.0"
] | 2 | 2021-08-12T17:54:36.000Z | 2021-08-12T18:21:28.000Z | src/stactools/cgls_lc100/stac.py | stactools-packages/stactools-cgls_lc100 | 9cc863336cd946d0e8651c1a3670cfc9c866f54c | [
"Apache-2.0"
] | null | null | null | import os
import pystac
from pystac.utils import str_to_datetime
import rasterio as rio
from shapely.geometry import box, mapping, shape
from stactools.cgls_lc100.constants import (
PROVIDER_NAME, ITEM_TIF_IMAGE_NAME, DISCRETE_CLASSIFICATION_CLASS_NAMES,
DISCRETE_CLASSIFICATION_CLASS_PALETTE)
def create_item(tif_href, additional_providers=None):
"""Creates a STAC Item from Copernicus Global Land Cover Layers data.
Args:
tif_href (str): The href to the metadata for this tif.
This function will read the metadata file for information to place in
the STAC item.
Returns:
pystac.Item: A STAC Item representing this Copernicus Global Land Cover Layers data.
"""
with rio.open(tif_href) as f:
tags = f.tags()
band_tags = f.tags(1)
bounds = f.bounds
# Item id
item_id = os.path.basename(tif_href).replace('.tif', '')
# Bounds
geom = mapping(box(bounds.left, bounds.bottom, bounds.right, bounds.top))
bounds = shape(geom).bounds
start_dt = str_to_datetime(tags.pop('time_coverage_start'))
end_dt = str_to_datetime(tags.pop('time_coverage_end'))
file_creation_dt = str_to_datetime(tags.pop('file_creation'))
item = pystac.Item(id=item_id,
geometry=geom,
bbox=bounds,
datetime=None,
properties={
'start_datetime':
start_dt,
'end_datetime':
end_dt,
'discrete_classification_class_names':
DISCRETE_CLASSIFICATION_CLASS_NAMES,
'discrete_classification_class_palette':
DISCRETE_CLASSIFICATION_CLASS_PALETTE
})
# Common metadata
copernicus_provider = pystac.Provider(name=PROVIDER_NAME,
url=(tags.pop('doi')),
roles=['producer', 'licensor'])
item.common_metadata.providers = [copernicus_provider]
if additional_providers is not None:
item.common_metadata.providers.extend(additional_providers)
item.common_metadata.start_datetime = start_dt
item.common_metadata.end_datetime = end_dt
item.common_metadata.created = file_creation_dt
item.common_metadata.description = tags.pop('Info')
item.common_metadata.platform = tags.pop('platform')
item.common_metadata.title = tags.pop('title')
# proj
item.ext.enable('projection')
item.ext.projection.epsg = int(
tags.pop('delivered_product_crs').replace('WGS84 (EPSG:',
'').replace(')', ''))
# Extra fields
for k, v in tags.items():
item.extra_fields[k] = v
# Bands
long_name = band_tags.pop('long_name')
band = pystac.extensions.eo.Band.create(
name=long_name,
common_name=band_tags.pop('short_name'),
description=long_name)
item.ext.enable('eo')
item.ext.eo.bands = [band]
# Tif
item.add_asset(
ITEM_TIF_IMAGE_NAME,
pystac.Asset(href=tif_href,
media_type=pystac.MediaType.TIFF,
roles=['data'],
title="tif image"))
return item
| 34.070707 | 92 | 0.602728 | import os
import pystac
from pystac.utils import str_to_datetime
import rasterio as rio
from shapely.geometry import box, mapping, shape
from stactools.cgls_lc100.constants import (
PROVIDER_NAME, ITEM_TIF_IMAGE_NAME, DISCRETE_CLASSIFICATION_CLASS_NAMES,
DISCRETE_CLASSIFICATION_CLASS_PALETTE)
def create_item(tif_href, additional_providers=None):
with rio.open(tif_href) as f:
tags = f.tags()
band_tags = f.tags(1)
bounds = f.bounds
item_id = os.path.basename(tif_href).replace('.tif', '')
geom = mapping(box(bounds.left, bounds.bottom, bounds.right, bounds.top))
bounds = shape(geom).bounds
start_dt = str_to_datetime(tags.pop('time_coverage_start'))
end_dt = str_to_datetime(tags.pop('time_coverage_end'))
file_creation_dt = str_to_datetime(tags.pop('file_creation'))
item = pystac.Item(id=item_id,
geometry=geom,
bbox=bounds,
datetime=None,
properties={
'start_datetime':
start_dt,
'end_datetime':
end_dt,
'discrete_classification_class_names':
DISCRETE_CLASSIFICATION_CLASS_NAMES,
'discrete_classification_class_palette':
DISCRETE_CLASSIFICATION_CLASS_PALETTE
})
copernicus_provider = pystac.Provider(name=PROVIDER_NAME,
url=(tags.pop('doi')),
roles=['producer', 'licensor'])
item.common_metadata.providers = [copernicus_provider]
if additional_providers is not None:
item.common_metadata.providers.extend(additional_providers)
item.common_metadata.start_datetime = start_dt
item.common_metadata.end_datetime = end_dt
item.common_metadata.created = file_creation_dt
item.common_metadata.description = tags.pop('Info')
item.common_metadata.platform = tags.pop('platform')
item.common_metadata.title = tags.pop('title')
item.ext.enable('projection')
item.ext.projection.epsg = int(
tags.pop('delivered_product_crs').replace('WGS84 (EPSG:',
'').replace(')', ''))
for k, v in tags.items():
item.extra_fields[k] = v
long_name = band_tags.pop('long_name')
band = pystac.extensions.eo.Band.create(
name=long_name,
common_name=band_tags.pop('short_name'),
description=long_name)
item.ext.enable('eo')
item.ext.eo.bands = [band]
item.add_asset(
ITEM_TIF_IMAGE_NAME,
pystac.Asset(href=tif_href,
media_type=pystac.MediaType.TIFF,
roles=['data'],
title="tif image"))
return item
| true | true |
f71af958c9c9d5d07c4709793698bfeea578307a | 2,917 | py | Python | tests/unit/test_requester.py | HKLM/sync-connect-sdk | 16ec0fecd31042970ee29146011250a74f4742ae | [
"MIT"
] | null | null | null | tests/unit/test_requester.py | HKLM/sync-connect-sdk | 16ec0fecd31042970ee29146011250a74f4742ae | [
"MIT"
] | null | null | null | tests/unit/test_requester.py | HKLM/sync-connect-sdk | 16ec0fecd31042970ee29146011250a74f4742ae | [
"MIT"
] | null | null | null | import syncconnect
import responses
import unittest
class TestRequester(unittest.TestCase):
EXPECTED = 'expected'
URL = 'http://ford.url'
def queue(self, status_code, **kwargs):
""" queue fake responses with passed status code """
if not kwargs:
json = {'message': self.EXPECTED}
else:
json = kwargs
responses.add('GET', self.URL, status=status_code, json=json)
def check(self, exception):
self.assertRaisesRegexp(
exception,
self.EXPECTED,
syncconnect.requester.call,
'GET',
self.URL)
@responses.activate
def test_user_agent(self):
self.queue(200)
syncconnect.requester.call('GET', self.URL)
self.assertEqual(
responses.calls[0].request.headers['User-Agent'],
'fordpass-na/353 CFNetwork/1121.2.2 Darwin/19.3.0',
)
@responses.activate
def test_oauth_error(self):
self.queue(401, error_description='unauthorized')
try:
syncconnect.requester.call('GET', self.URL)
except syncconnect.AuthenticationException as err:
self.assertEqual(err.message, 'unauthorized')
@responses.activate
def test_unknown_error(self):
self.queue(401, error_description='unknown error')
try:
syncconnect.requester.call('GET', self.URL)
except syncconnect.AuthenticationException as err:
self.assertEqual(err.message, 'unknown error')
@responses.activate
def test_400(self):
self.queue(400)
self.check(syncconnect.ValidationException)
@responses.activate
def test_401(self):
self.queue(401)
self.check(syncconnect.AuthenticationException)
@responses.activate
def test_403(self):
self.queue(403)
self.check(syncconnect.PermissionException)
@responses.activate
def test_404(self):
self.queue(404)
self.check(syncconnect.ResourceNotFoundException)
@responses.activate
def test_429(self):
self.queue(429)
self.check(syncconnect.RateLimitingException)
@responses.activate
def test_429(self):
self.queue(429)
self.check(syncconnect.RateLimitingException)
@responses.activate
def test_500(self):
self.queue(500)
self.check(syncconnect.ServerException)
@responses.activate
def test_504(self):
responses.add('GET', self.URL, status=504, json={
'error': 'some error', 'message': self.EXPECTED})
self.check(syncconnect.GatewayTimeoutException)
@responses.activate
def test_other(self):
self.queue(503)
with self.assertRaises(syncconnect.SyncException) as se:
syncconnect.requester.call('GET', self.URL)
self.assertEquals(se.exception.message, 'Unexpected error')
| 28.881188 | 71 | 0.637299 | import syncconnect
import responses
import unittest
class TestRequester(unittest.TestCase):
EXPECTED = 'expected'
URL = 'http://ford.url'
def queue(self, status_code, **kwargs):
if not kwargs:
json = {'message': self.EXPECTED}
else:
json = kwargs
responses.add('GET', self.URL, status=status_code, json=json)
def check(self, exception):
self.assertRaisesRegexp(
exception,
self.EXPECTED,
syncconnect.requester.call,
'GET',
self.URL)
@responses.activate
def test_user_agent(self):
self.queue(200)
syncconnect.requester.call('GET', self.URL)
self.assertEqual(
responses.calls[0].request.headers['User-Agent'],
'fordpass-na/353 CFNetwork/1121.2.2 Darwin/19.3.0',
)
@responses.activate
def test_oauth_error(self):
self.queue(401, error_description='unauthorized')
try:
syncconnect.requester.call('GET', self.URL)
except syncconnect.AuthenticationException as err:
self.assertEqual(err.message, 'unauthorized')
@responses.activate
def test_unknown_error(self):
self.queue(401, error_description='unknown error')
try:
syncconnect.requester.call('GET', self.URL)
except syncconnect.AuthenticationException as err:
self.assertEqual(err.message, 'unknown error')
@responses.activate
def test_400(self):
self.queue(400)
self.check(syncconnect.ValidationException)
@responses.activate
def test_401(self):
self.queue(401)
self.check(syncconnect.AuthenticationException)
@responses.activate
def test_403(self):
self.queue(403)
self.check(syncconnect.PermissionException)
@responses.activate
def test_404(self):
self.queue(404)
self.check(syncconnect.ResourceNotFoundException)
@responses.activate
def test_429(self):
self.queue(429)
self.check(syncconnect.RateLimitingException)
@responses.activate
def test_429(self):
self.queue(429)
self.check(syncconnect.RateLimitingException)
@responses.activate
def test_500(self):
self.queue(500)
self.check(syncconnect.ServerException)
@responses.activate
def test_504(self):
responses.add('GET', self.URL, status=504, json={
'error': 'some error', 'message': self.EXPECTED})
self.check(syncconnect.GatewayTimeoutException)
@responses.activate
def test_other(self):
self.queue(503)
with self.assertRaises(syncconnect.SyncException) as se:
syncconnect.requester.call('GET', self.URL)
self.assertEquals(se.exception.message, 'Unexpected error')
| true | true |
f71af9645244b8b41948a3f0545272ecb692549e | 2,122 | py | Python | lib/CollectionCheckerDIF.py | joser1945/cmr-metadata-review | df0bb24dd06f981af907569f1a97966753053a99 | [
"Apache-2.0"
] | 15 | 2018-06-26T19:58:44.000Z | 2022-03-01T21:19:34.000Z | lib/CollectionCheckerDIF.py | joser1945/cmr-metadata-review | df0bb24dd06f981af907569f1a97966753053a99 | [
"Apache-2.0"
] | 61 | 2018-06-27T15:15:41.000Z | 2022-03-08T15:39:32.000Z | lib/CollectionCheckerDIF.py | joser1945/cmr-metadata-review | df0bb24dd06f981af907569f1a97966753053a99 | [
"Apache-2.0"
] | 9 | 2019-01-22T15:48:48.000Z | 2021-10-01T18:38:30.000Z | '''
Copyright 2016, United States Government, as represented by the Administrator of
the National Aeronautics and Space Administration. All rights reserved.
The "pyCMR" platform is licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. You may obtain a
copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing, software distributed under
the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
ANY KIND, either express or implied. See the License for the specific language
governing permissions and limitations under the License.
'''
import json
import sys
from CheckerDIF import checkerRules
from CSVDIF import DIFOutputCSV
from JsonDIF import DIFOutputJSON
class Checker():
def __init__(self):
self.checkerRules = checkerRules()
self.DIFOutputCSV = DIFOutputCSV(self.checkerRules,self.wrap)
self.DIFOutputJSON = DIFOutputJSON(self.checkerRules,self.wrap)
def getItemList(self, items, keys):
results = []
if type(items) is not list:
items = [items]
if len(keys) == 0:
return items
for item in items:
if item.has_key(keys[0]):
results += self.getItemList(item[keys[0]], keys[1:])
else:
results += [None]
return results
def wrap(self, items, func, child):
results = []
keys = child.split('.')
itemLst = self.getItemList(items, keys)
for item in itemLst:
#if item == None:
#results.append('None')
#else:
results.append(func(item))
return ';'.join(results)
def checkAll(self, metadata):
return self.DIFOutputCSV.checkAll(metadata)
def checkAllJSON(self,metadata):
return self.DIFOutputJSON.checkAll(metadata)
x = Checker()
with open(sys.argv[1], 'r') as f:
contents = f.read()
resultFields = x.checkAllJSON(contents)
print(json.dumps(resultFields))
| 33.15625 | 88 | 0.661169 |
import json
import sys
from CheckerDIF import checkerRules
from CSVDIF import DIFOutputCSV
from JsonDIF import DIFOutputJSON
class Checker():
def __init__(self):
self.checkerRules = checkerRules()
self.DIFOutputCSV = DIFOutputCSV(self.checkerRules,self.wrap)
self.DIFOutputJSON = DIFOutputJSON(self.checkerRules,self.wrap)
def getItemList(self, items, keys):
results = []
if type(items) is not list:
items = [items]
if len(keys) == 0:
return items
for item in items:
if item.has_key(keys[0]):
results += self.getItemList(item[keys[0]], keys[1:])
else:
results += [None]
return results
def wrap(self, items, func, child):
results = []
keys = child.split('.')
itemLst = self.getItemList(items, keys)
for item in itemLst:
results.append(func(item))
return ';'.join(results)
def checkAll(self, metadata):
return self.DIFOutputCSV.checkAll(metadata)
def checkAllJSON(self,metadata):
return self.DIFOutputJSON.checkAll(metadata)
x = Checker()
with open(sys.argv[1], 'r') as f:
contents = f.read()
resultFields = x.checkAllJSON(contents)
print(json.dumps(resultFields))
| true | true |
f71afa37b95ab3440c489490d28114d5823b2630 | 409 | py | Python | job_extract/jobs/impl/indeed_cursor.py | evbarnett/job_extract | dafa4d69a5daca83d337192617b244c89e4b5ae6 | [
"MIT"
] | null | null | null | job_extract/jobs/impl/indeed_cursor.py | evbarnett/job_extract | dafa4d69a5daca83d337192617b244c89e4b5ae6 | [
"MIT"
] | null | null | null | job_extract/jobs/impl/indeed_cursor.py | evbarnett/job_extract | dafa4d69a5daca83d337192617b244c89e4b5ae6 | [
"MIT"
] | null | null | null | class IndeedCursor(JobCursor):
def __init__(self, title: str, location: str, radius: int = 25):
base_url = "https://www.indeed.com/jobs?"
self._title = title
self._location = location
title_esc = ul.quote(self._title, safe='')
location_esc = ul.quote(self._location, safe='')
req_url = base_url + "q={}&l={}".format(title_esc, location_esc)
# TODO
| 37.181818 | 72 | 0.611247 | class IndeedCursor(JobCursor):
def __init__(self, title: str, location: str, radius: int = 25):
base_url = "https://www.indeed.com/jobs?"
self._title = title
self._location = location
title_esc = ul.quote(self._title, safe='')
location_esc = ul.quote(self._location, safe='')
req_url = base_url + "q={}&l={}".format(title_esc, location_esc)
| true | true |
f71afacc656469a5f59aa99865a3ea05cae6a31d | 6,053 | py | Python | Test/Machine/rbm.py | tvieijra/netket | ef3ff32b242f25b6a6ae0f08db1aada85775a2ea | [
"Apache-2.0"
] | 10 | 2019-11-29T02:51:53.000Z | 2021-08-14T18:52:33.000Z | Test/Machine/rbm.py | tvieijra/netket | ef3ff32b242f25b6a6ae0f08db1aada85775a2ea | [
"Apache-2.0"
] | 2 | 2020-03-03T11:12:00.000Z | 2020-05-01T17:04:41.000Z | Test/Machine/rbm.py | tvieijra/netket | ef3ff32b242f25b6a6ae0f08db1aada85775a2ea | [
"Apache-2.0"
] | 6 | 2019-12-02T07:29:01.000Z | 2021-04-04T21:55:21.000Z | # Copyright 2019 The Simons Foundation, Inc. - All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import netket
import numpy as _np
__all__ = ["PyRbm"]
class PyRbm(netket.machine.CxxMachine):
"""
__Do not use me in production code!__
A proof of concept implementation of a complex-valued RBM in pure Python.
This is an example of how to subclass `CxxMachine` so that the machine will
be usable with NetKet's C++ core.
This class can be used as a drop-in replacement for `RbmSpin`.
"""
def __init__(
self, hilbert, alpha=None, use_visible_bias=True, use_hidden_bias=True
):
r"""Constructs a new RBM.
Args:
hilbert: Hilbert space.
alpha: `alpha * hilbert.size` is the number of hidden spins.
use_visible_bias: specifies whether to use a bias for visible
spins.
use_hidden_bias: specifies whether to use a bias for hidden spins.
"""
# NOTE: The following call to __init__ is important!
super(PyRbm, self).__init__(hilbert)
n = hilbert.size
if alpha < 0:
raise ValueError("`alpha` should be non-negative")
m = int(round(alpha * n))
self._w = _np.empty([m, n], dtype=_np.complex128)
self._a = _np.empty(n, dtype=_np.complex128) if use_visible_bias else None
self._b = _np.empty(m, dtype=_np.complex128) if use_hidden_bias else None
def _number_parameters(self):
r"""Returns the number of parameters in the machine. We just sum the
sizes of all the tensors we hold.
"""
return (
self._w.size
+ (self._a.size if self._a is not None else 0)
+ (self._b.size if self._b is not None else 0)
)
def _number_visible(self):
r"""Returns the number of visible units.
"""
return self._w.shape[1]
def _get_parameters(self):
r"""Returns the parameters as a 1D tensor.
This function tries to order parameters in the exact same way as
``RbmSpin`` does so that we can do stuff like
>>> import netket
>>> import numpy
>>> hilbert = netket.hilbert.Spin(
graph=netket.graph.Hypercube(length=100, n_dim=1),
s=1/2.
)
>>> cxx_rbm = netket.machine.RbmSpin(hilbert, alpha=3)
>>> py_rbm = netket.machine.PyRbm(hilbert, alpha=3)
>>> cxx_rbm.init_random_parameters()
>>> # Order of parameters is the same, so we can assign one to the
>>> # other
>>> py_rbm.parameters = cxx_rbm.parameters
>>> x = np.array(hilbert.local_states, size=hilbert.size)
>>> assert numpy.isclose(py_rbm.log_val(x), cxx_rbm.log_val(x))
"""
params = tuple()
if self._a is not None:
params += (self._a,)
if self._b is not None:
params += (self._b,)
params += (self._w.reshape(-1, order="C"),)
return _np.concatenate(params)
def _set_parameters(self, p):
r"""Sets parameters from a 1D tensor.
``self._set_parameters(self._get_parameters())`` is an identity.
"""
i = 0
if self._a is not None:
self._a[:] = p[i : i + self._a.size]
i += self._a.size
if self._b is not None:
self._b[:] = p[i : i + self._b.size]
i += self._b.size
self._w[:] = p[i : i + self._w.size].reshape(self._w.shape, order="C")
def log_val(self, x):
r"""Computes the logarithm of the wave function given a spin
configuration ``x``.
"""
r = _np.dot(self._w, x)
if self._b is not None:
r += self._b
r = _np.sum(PyRbm._log_cosh(r))
if self._a is not None:
r += _np.dot(self._a, x)
# Officially, we should return
# self._w.shape[0] * 0.6931471805599453 + r
# but the C++ implementation ignores the "constant factor"
return r
def der_log(self, x):
r"""Computes the gradient of the logarithm of the wave function
given a spin configuration ``x``.
"""
grad = _np.empty(self.n_par, dtype=_np.complex128)
i = 0
if self._a is not None:
grad[i : i + self._a.size] = x
i += self._a.size
tanh_stuff = _np.dot(self._w, x)
if self._b is not None:
tanh_stuff += self._b
tanh_stuff = _np.tanh(tanh_stuff, out=tanh_stuff)
if self._b is not None:
grad[i : i + self._b.size] = tanh_stuff
i += self._b.size
out = grad[i : i + self._w.size]
out.shape = (tanh_stuff.size, x.size)
_np.outer(tanh_stuff, x, out=out)
return grad
def _is_holomorphic(self):
r"""Complex valued RBM a holomorphic function.
"""
return True
def save(self, filename):
r"""Saves machine weights to ``filename`` using ``pickle``.
"""
import pickle
with open(filename, "wb") as output_file:
pickle.dump((self._w, self._a, self._b), output_file)
def load(self, filename):
r"""Loads machine weights from ``filename`` using ``pickle``.
"""
import pickle
with open(filename, "rb") as input_file:
self._w, self._a, self._b = pickle.load(input_file)
@staticmethod
def _log_cosh(x):
# TODO: Handle big numbers properly
return _np.log(_np.cosh(x))
| 33.815642 | 82 | 0.587973 |
import netket
import numpy as _np
__all__ = ["PyRbm"]
class PyRbm(netket.machine.CxxMachine):
def __init__(
self, hilbert, alpha=None, use_visible_bias=True, use_hidden_bias=True
):
super(PyRbm, self).__init__(hilbert)
n = hilbert.size
if alpha < 0:
raise ValueError("`alpha` should be non-negative")
m = int(round(alpha * n))
self._w = _np.empty([m, n], dtype=_np.complex128)
self._a = _np.empty(n, dtype=_np.complex128) if use_visible_bias else None
self._b = _np.empty(m, dtype=_np.complex128) if use_hidden_bias else None
def _number_parameters(self):
return (
self._w.size
+ (self._a.size if self._a is not None else 0)
+ (self._b.size if self._b is not None else 0)
)
def _number_visible(self):
return self._w.shape[1]
def _get_parameters(self):
params = tuple()
if self._a is not None:
params += (self._a,)
if self._b is not None:
params += (self._b,)
params += (self._w.reshape(-1, order="C"),)
return _np.concatenate(params)
def _set_parameters(self, p):
i = 0
if self._a is not None:
self._a[:] = p[i : i + self._a.size]
i += self._a.size
if self._b is not None:
self._b[:] = p[i : i + self._b.size]
i += self._b.size
self._w[:] = p[i : i + self._w.size].reshape(self._w.shape, order="C")
def log_val(self, x):
r = _np.dot(self._w, x)
if self._b is not None:
r += self._b
r = _np.sum(PyRbm._log_cosh(r))
if self._a is not None:
r += _np.dot(self._a, x)
return r
def der_log(self, x):
grad = _np.empty(self.n_par, dtype=_np.complex128)
i = 0
if self._a is not None:
grad[i : i + self._a.size] = x
i += self._a.size
tanh_stuff = _np.dot(self._w, x)
if self._b is not None:
tanh_stuff += self._b
tanh_stuff = _np.tanh(tanh_stuff, out=tanh_stuff)
if self._b is not None:
grad[i : i + self._b.size] = tanh_stuff
i += self._b.size
out = grad[i : i + self._w.size]
out.shape = (tanh_stuff.size, x.size)
_np.outer(tanh_stuff, x, out=out)
return grad
def _is_holomorphic(self):
return True
def save(self, filename):
import pickle
with open(filename, "wb") as output_file:
pickle.dump((self._w, self._a, self._b), output_file)
def load(self, filename):
import pickle
with open(filename, "rb") as input_file:
self._w, self._a, self._b = pickle.load(input_file)
@staticmethod
def _log_cosh(x):
return _np.log(_np.cosh(x))
| true | true |
f71afb68427e72653aff0696997abed27acca654 | 2,058 | py | Python | aioradio/jira.py | nrccua/aioradio | 2437c2a7fcad81c2e410002c685f587df2fcd76c | [
"MIT"
] | 9 | 2021-01-04T13:13:03.000Z | 2021-04-30T18:41:08.000Z | aioradio/jira.py | nrccua/aioradio | 2437c2a7fcad81c2e410002c685f587df2fcd76c | [
"MIT"
] | null | null | null | aioradio/jira.py | nrccua/aioradio | 2437c2a7fcad81c2e410002c685f587df2fcd76c | [
"MIT"
] | null | null | null | """Generic functions related to Jira."""
from typing import Any, Dict
import httpx
async def post_jira_issue(url: str, jira_user: str, jira_token: str, payload: Dict[str, Any]) -> Dict[str, Any]:
"""Post payload to create jira issue.
Args:
url (str): url
jira_user (str): jira username
jira_token (str): jira token
payload (Dict[str, Any]): jira payload describing ticket info
Returns:
Dict[str, Any]: response of operation
"""
headers = {'Content-Type': 'application/json'}
auth = (jira_user, jira_token)
async with httpx.AsyncClient() as client:
return await client.post(url=url, json=payload, auth=auth, headers=headers)
async def get_jira_issue(url: str, jira_user: str, jira_token: str) -> Dict[str, Any]:
"""Get Jira issue using jira_link built with the expected jira_id, an
example: https://nrccua.atlassian.net/rest/api/2/issue/<jira_id>.
Args:
url (str): url
jira_user (str): jira username
jira_token (str): jira token
Returns:
Dict[str, Any]: response of operation
"""
headers = {'Content-Type': 'application/json'}
auth = (jira_user, jira_token)
async with httpx.AsyncClient() as client:
return await client.get(url=url, auth=auth, headers=headers)
async def add_comment_to_jira(url: str, jira_user: str, jira_token: str, comment: str) -> Dict[str, Any]:
"""Add Jira comment to an existing issue.
Args:
url (str): url
jira_user (str): jira username
jira_token (str): jira token
comment (str): comment to add to jira ticket
Raises:
ValueError: problem with url
Returns:
Dict[str, Any]: response of operation
"""
if not url.endswith('comment'):
msg = 'Check url value! Good example is https://nrccua.atlassian.net/rest/api/2/issue/<jira_id>/comment'
raise ValueError(msg)
return await post_jira_issue(
url=url, payload={'body': comment}, jira_user=jira_user, jira_token=jira_token)
| 30.264706 | 112 | 0.651118 |
from typing import Any, Dict
import httpx
async def post_jira_issue(url: str, jira_user: str, jira_token: str, payload: Dict[str, Any]) -> Dict[str, Any]:
headers = {'Content-Type': 'application/json'}
auth = (jira_user, jira_token)
async with httpx.AsyncClient() as client:
return await client.post(url=url, json=payload, auth=auth, headers=headers)
async def get_jira_issue(url: str, jira_user: str, jira_token: str) -> Dict[str, Any]:
headers = {'Content-Type': 'application/json'}
auth = (jira_user, jira_token)
async with httpx.AsyncClient() as client:
return await client.get(url=url, auth=auth, headers=headers)
async def add_comment_to_jira(url: str, jira_user: str, jira_token: str, comment: str) -> Dict[str, Any]:
if not url.endswith('comment'):
msg = 'Check url value! Good example is https://nrccua.atlassian.net/rest/api/2/issue/<jira_id>/comment'
raise ValueError(msg)
return await post_jira_issue(
url=url, payload={'body': comment}, jira_user=jira_user, jira_token=jira_token)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.