text
stringlengths 2
999k
|
|---|
# Creating an application
# there is a folder name ... new folder1...where audios,videos,images are there
# so you have to keep it in separate folders
import os, shutil
# NOTE --> you can write every single extension inside tuples(becuz we don't want to change the values)
dict_extensions = {
'audio_extensions' : ('.mp3','.m4a','.wav','.flac'),
'video_extensions' : ('.mp4','.mkv','.MKV','.flv','.mpeg','.MOV'),
'documents_extensions' : ('.docx','.pdf','.txt'),
}
folderpath = input('enter folder path : ')
def file_finder(folder_path,file_extensions):
files = []
for file in os.listdir(folder_path):
for extension in file_extensions:
if file.endswith(extension):
files.append(file)
return files
#print(file_finder(folderpath,video_extensions))
for extension_type,extension_tuple in dict_extensions.items():
#print('calling file finder')
#print(file_finder(folderpath,extension_tuple))
folder_name = extension_type.split('_')[0] + 'Files'
folder_path = os.path.join(folderpath,folder_name)
os.mkdir(folder_path)
for item in file_finder(folderpath,extension_tuple):
item_path = os.path.join(folderpath,item)
item_new_path = os.path.join(folder_path,item)
shutil.move(item_path,item_new_path)
|
from .encoding import Encoding
from .wrappers import Upsample, resize
__all__ = [
'Upsample',
'resize',
'Encoding',
]
|
# coding: utf-8
# /*##########################################################################
# Copyright (C) 2016-2021 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ############################################################################*/
"""
Nominal tests of the histogramnd function.
"""
import unittest
import pytest
import numpy as np
from silx.math.chistogramnd import chistogramnd as histogramnd
from silx.math import Histogramnd
def _get_bin_edges(histo_range, n_bins, n_dims):
edges = []
for i_dim in range(n_dims):
edges.append(histo_range[i_dim, 0] +
np.arange(n_bins[i_dim] + 1) *
(histo_range[i_dim, 1] - histo_range[i_dim, 0]) /
n_bins[i_dim])
return tuple(edges)
# ==============================================================
# ==============================================================
# ==============================================================
class _Test_chistogramnd_nominal(unittest.TestCase):
"""
Unit tests of the histogramnd function.
"""
__test__ = False # ignore abstract classe
ndims = None
def setUp(self):
if type(self).__name__.startswith("_"):
self.skipTest("Abstract class")
ndims = self.ndims
self.tested_dim = ndims-1
if ndims is None:
raise ValueError('ndims class member not set.')
sample = np.array([5.5, -3.3,
0., -0.5,
3.3, 8.8,
-7.7, 6.0,
-4.0])
weights = np.array([500.5, -300.3,
0.01, -0.5,
300.3, 800.8,
-700.7, 600.6,
-400.4])
n_elems = len(sample)
if ndims == 1:
shape = (n_elems,)
else:
shape = (n_elems, ndims)
self.sample = np.zeros(shape=shape, dtype=sample.dtype)
if ndims == 1:
self.sample = sample
else:
self.sample[..., ndims-1] = sample
self.weights = weights
# the tests are performed along one dimension,
# all the other bins indices along the other dimensions
# are expected to be 2
# (e.g : when testing a 2D sample : [0, x] will go into
# bin [2, y] because of the bin ranges [-2, 2] and n_bins = 4
# for the first dimension)
self.other_axes_index = 2
self.histo_range = np.repeat([[-2., 2.]], ndims, axis=0)
self.histo_range[ndims-1] = [-4., 6.]
self.n_bins = np.array([4]*ndims)
self.n_bins[ndims-1] = 5
if ndims == 1:
def fill_histo(h, v, dim, op=None):
if op:
h[:] = op(h[:], v)
else:
h[:] = v
self.fill_histo = fill_histo
else:
def fill_histo(h, v, dim, op=None):
idx = [self.other_axes_index]*len(h.shape)
idx[dim] = slice(0, None)
idx = tuple(idx)
if op:
h[idx] = op(h[idx], v)
else:
h[idx] = v
self.fill_histo = fill_histo
def test_nominal(self):
"""
"""
expected_h_tpl = np.array([2, 1, 1, 1, 1])
expected_c_tpl = np.array([-700.7, -0.5, 0.01, 300.3, 500.5])
expected_h = np.zeros(shape=self.n_bins, dtype=np.double)
expected_c = np.zeros(shape=self.n_bins, dtype=np.double)
self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)
self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)
histo, cumul, bin_edges = histogramnd(self.sample,
self.histo_range,
self.n_bins,
weights=self.weights)
expected_edges = _get_bin_edges(self.histo_range,
self.n_bins,
self.ndims)
self.assertEqual(cumul.dtype, np.float64)
self.assertEqual(histo.dtype, np.uint32)
self.assertTrue(np.array_equal(histo, expected_h))
self.assertTrue(np.array_equal(cumul, expected_c))
for i_edges, edges in enumerate(expected_edges):
self.assertTrue(np.array_equal(bin_edges[i_edges],
expected_edges[i_edges]),
msg='Testing bin_edges for dim {0}'
''.format(i_edges+1))
def test_nominal_wh_dtype(self):
"""
"""
expected_h_tpl = np.array([2, 1, 1, 1, 1])
expected_c_tpl = np.array([-700.7, -0.5, 0.01, 300.3, 500.5])
expected_h = np.zeros(shape=self.n_bins, dtype=np.double)
expected_c = np.zeros(shape=self.n_bins, dtype=np.float32)
self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)
self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)
histo, cumul, bin_edges = histogramnd(self.sample,
self.histo_range,
self.n_bins,
weights=self.weights,
wh_dtype=np.float32)
self.assertEqual(cumul.dtype, np.float32)
self.assertTrue(np.array_equal(histo, expected_h))
self.assertTrue(np.allclose(cumul, expected_c))
def test_nominal_uncontiguous_sample(self):
"""
"""
expected_h_tpl = np.array([2, 1, 1, 1, 1])
expected_c_tpl = np.array([-700.7, -0.5, 0.01, 300.3, 500.5])
expected_h = np.zeros(shape=self.n_bins, dtype=np.double)
expected_c = np.zeros(shape=self.n_bins, dtype=np.double)
self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)
self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)
shape = list(self.sample.shape)
shape[0] *= 2
sample = np.zeros(shape, dtype=self.sample.dtype)
uncontig_sample = sample[::2, ...]
uncontig_sample[:] = self.sample
self.assertFalse(uncontig_sample.flags['C_CONTIGUOUS'],
msg='Making sure the array is not contiguous.')
histo, cumul, bin_edges = histogramnd(uncontig_sample,
self.histo_range,
self.n_bins,
weights=self.weights)
self.assertEqual(cumul.dtype, np.float64)
self.assertEqual(histo.dtype, np.uint32)
self.assertTrue(np.array_equal(histo, expected_h))
self.assertTrue(np.array_equal(cumul, expected_c))
def test_nominal_uncontiguous_weights(self):
"""
"""
expected_h_tpl = np.array([2, 1, 1, 1, 1])
expected_c_tpl = np.array([-700.7, -0.5, 0.01, 300.3, 500.5])
expected_h = np.zeros(shape=self.n_bins, dtype=np.double)
expected_c = np.zeros(shape=self.n_bins, dtype=np.double)
self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)
self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)
shape = list(self.weights.shape)
shape[0] *= 2
weights = np.zeros(shape, dtype=self.weights.dtype)
uncontig_weights = weights[::2, ...]
uncontig_weights[:] = self.weights
self.assertFalse(uncontig_weights.flags['C_CONTIGUOUS'],
msg='Making sure the array is not contiguous.')
histo, cumul, bin_edges = histogramnd(self.sample,
self.histo_range,
self.n_bins,
weights=uncontig_weights)
self.assertEqual(cumul.dtype, np.float64)
self.assertEqual(histo.dtype, np.uint32)
self.assertTrue(np.array_equal(histo, expected_h))
self.assertTrue(np.array_equal(cumul, expected_c))
def test_nominal_wo_weights(self):
"""
"""
expected_h_tpl = np.array([2, 1, 1, 1, 1])
expected_h = np.zeros(shape=self.n_bins, dtype=np.double)
self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)
histo, cumul = histogramnd(self.sample,
self.histo_range,
self.n_bins,
weights=None)[0:2]
self.assertTrue(np.array_equal(histo, expected_h))
self.assertTrue(cumul is None)
def test_nominal_wo_weights_w_cumul(self):
"""
"""
expected_h_tpl = np.array([2, 1, 1, 1, 1])
expected_h = np.zeros(shape=self.n_bins, dtype=np.double)
# creating an array of ones just to make sure that
# it is not cleared by histogramnd
cumul_in = np.ones(self.n_bins, dtype=np.double)
self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)
histo, cumul = histogramnd(self.sample,
self.histo_range,
self.n_bins,
weights=None,
weighted_histo=cumul_in)[0:2]
self.assertTrue(np.array_equal(histo, expected_h))
self.assertTrue(cumul is None)
self.assertTrue(np.array_equal(cumul_in,
np.ones(shape=self.n_bins,
dtype=np.double)))
def test_nominal_wo_weights_w_histo(self):
"""
"""
expected_h_tpl = np.array([2, 1, 1, 1, 1])
expected_h = np.zeros(shape=self.n_bins, dtype=np.double)
# creating an array of ones just to make sure that
# it is not cleared by histogramnd
histo_in = np.ones(self.n_bins, dtype=np.uint32)
self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)
histo, cumul = histogramnd(self.sample,
self.histo_range,
self.n_bins,
weights=None,
histo=histo_in)[0:2]
self.assertTrue(np.array_equal(histo, expected_h + 1))
self.assertTrue(cumul is None)
self.assertEqual(id(histo), id(histo_in))
def test_nominal_last_bin_closed(self):
"""
"""
expected_h_tpl = np.array([2, 1, 1, 1, 2])
expected_c_tpl = np.array([-700.7, -0.5, 0.01, 300.3, 1101.1])
expected_h = np.zeros(shape=self.n_bins, dtype=np.double)
expected_c = np.zeros(shape=self.n_bins, dtype=np.double)
self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)
self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)
histo, cumul = histogramnd(self.sample,
self.histo_range,
self.n_bins,
weights=self.weights,
last_bin_closed=True)[0:2]
self.assertTrue(np.array_equal(histo, expected_h))
self.assertTrue(np.array_equal(cumul, expected_c))
def test_int32_weights_double_weights_range(self):
"""
"""
weight_min = -299.9 # ===> will be cast to -299
weight_max = 499.9 # ===> will be cast to 499
expected_h_tpl = np.array([0, 1, 1, 1, 0])
expected_c_tpl = np.array([0., 0., 0., 300., 0.])
expected_h = np.zeros(shape=self.n_bins, dtype=np.double)
expected_c = np.zeros(shape=self.n_bins, dtype=np.double)
self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)
self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)
histo, cumul = histogramnd(self.sample,
self.histo_range,
self.n_bins,
weights=self.weights.astype(np.int32),
weight_min=weight_min,
weight_max=weight_max)[0:2]
self.assertTrue(np.array_equal(histo, expected_h))
self.assertTrue(np.array_equal(cumul, expected_c))
def test_reuse_histo(self):
"""
"""
expected_h_tpl = np.array([2, 3, 2, 2, 2])
expected_c_tpl = np.array([0.0, -7007, -5.0, 0.1, 3003.0])
expected_h = np.zeros(shape=self.n_bins, dtype=np.double)
expected_c = np.zeros(shape=self.n_bins, dtype=np.double)
self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)
self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)
histo, cumul = histogramnd(self.sample,
self.histo_range,
self.n_bins,
weights=self.weights)[0:2]
sample_2 = self.sample[:]
if len(sample_2.shape) == 1:
idx = (slice(0, None),)
else:
idx = slice(0, None), self.tested_dim
sample_2[idx] += 2
histo_2, cumul = histogramnd(sample_2, # <==== !!
self.histo_range,
self.n_bins,
weights=10 * self.weights, # <==== !!
histo=histo)[0:2]
self.assertTrue(np.array_equal(histo, expected_h))
self.assertTrue(np.array_equal(cumul, expected_c))
self.assertEqual(id(histo), id(histo_2))
def test_reuse_cumul(self):
"""
"""
expected_h_tpl = np.array([0, 2, 1, 1, 1])
expected_c_tpl = np.array([-700.7, -7007.5, -4.99, 300.4, 3503.5])
expected_h = np.zeros(shape=self.n_bins, dtype=np.double)
expected_c = np.zeros(shape=self.n_bins, dtype=np.double)
self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)
self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)
histo, cumul = histogramnd(self.sample,
self.histo_range,
self.n_bins,
weights=self.weights)[0:2]
sample_2 = self.sample[:]
if len(sample_2.shape) == 1:
idx = (slice(0, None),)
else:
idx = slice(0, None), self.tested_dim
sample_2[idx] += 2
histo, cumul_2 = histogramnd(sample_2, # <==== !!
self.histo_range,
self.n_bins,
weights=10 * self.weights, # <==== !!
weighted_histo=cumul)[0:2]
self.assertEqual(cumul.dtype, np.float64)
self.assertTrue(np.array_equal(histo, expected_h))
self.assertTrue(np.allclose(cumul, expected_c, rtol=10e-15))
self.assertEqual(id(cumul), id(cumul_2))
def test_reuse_cumul_float(self):
"""
"""
expected_h_tpl = np.array([0, 2, 1, 1, 1])
expected_c_tpl = np.array([-700.7, -7007.5, -4.99, 300.4, 3503.5],
dtype=np.float32)
expected_h = np.zeros(shape=self.n_bins, dtype=np.double)
expected_c = np.zeros(shape=self.n_bins, dtype=np.double)
self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)
self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)
histo, cumul = histogramnd(self.sample,
self.histo_range,
self.n_bins,
weights=self.weights)[0:2]
# converting the cumul array to float
cumul = cumul.astype(np.float32)
sample_2 = self.sample[:]
if len(sample_2.shape) == 1:
idx = (slice(0, None),)
else:
idx = slice(0, None), self.tested_dim
sample_2[idx] += 2
histo, cumul_2 = histogramnd(sample_2, # <==== !!
self.histo_range,
self.n_bins,
weights=10 * self.weights, # <==== !!
weighted_histo=cumul)[0:2]
self.assertEqual(cumul.dtype, np.float32)
self.assertTrue(np.array_equal(histo, expected_h))
self.assertEqual(id(cumul), id(cumul_2))
self.assertTrue(np.allclose(cumul, expected_c, rtol=10e-15))
class _Test_Histogramnd_nominal(unittest.TestCase):
"""
Unit tests of the Histogramnd class.
"""
__test__ = False # ignore abstract class
ndims = None
def setUp(self):
ndims = self.ndims
if ndims is None:
self.skipTest("Abstract class")
self.tested_dim = ndims-1
if ndims is None:
raise ValueError('ndims class member not set.')
sample = np.array([5.5, -3.3,
0., -0.5,
3.3, 8.8,
-7.7, 6.0,
-4.0])
weights = np.array([500.5, -300.3,
0.01, -0.5,
300.3, 800.8,
-700.7, 600.6,
-400.4])
n_elems = len(sample)
if ndims == 1:
shape = (n_elems,)
else:
shape = (n_elems, ndims)
self.sample = np.zeros(shape=shape, dtype=sample.dtype)
if ndims == 1:
self.sample = sample
else:
self.sample[..., ndims-1] = sample
self.weights = weights
# the tests are performed along one dimension,
# all the other bins indices along the other dimensions
# are expected to be 2
# (e.g : when testing a 2D sample : [0, x] will go into
# bin [2, y] because of the bin ranges [-2, 2] and n_bins = 4
# for the first dimension)
self.other_axes_index = 2
self.histo_range = np.repeat([[-2., 2.]], ndims, axis=0)
self.histo_range[ndims-1] = [-4., 6.]
self.n_bins = np.array([4]*ndims)
self.n_bins[ndims-1] = 5
if ndims == 1:
def fill_histo(h, v, dim, op=None):
if op:
h[:] = op(h[:], v)
else:
h[:] = v
self.fill_histo = fill_histo
else:
def fill_histo(h, v, dim, op=None):
idx = [self.other_axes_index]*len(h.shape)
idx[dim] = slice(0, None)
idx = tuple(idx)
if op:
h[idx] = op(h[idx], v)
else:
h[idx] = v
self.fill_histo = fill_histo
def test_nominal(self):
"""
"""
expected_h_tpl = np.array([2, 1, 1, 1, 1])
expected_c_tpl = np.array([-700.7, -0.5, 0.01, 300.3, 500.5])
expected_h = np.zeros(shape=self.n_bins, dtype=np.double)
expected_c = np.zeros(shape=self.n_bins, dtype=np.double)
self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)
self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)
histo = Histogramnd(self.sample,
self.histo_range,
self.n_bins,
weights=self.weights)
histo, cumul, bin_edges = histo
expected_edges = _get_bin_edges(self.histo_range,
self.n_bins,
self.ndims)
self.assertEqual(cumul.dtype, np.float64)
self.assertEqual(histo.dtype, np.uint32)
self.assertTrue(np.array_equal(histo, expected_h))
self.assertTrue(np.array_equal(cumul, expected_c))
for i_edges, edges in enumerate(expected_edges):
self.assertTrue(np.array_equal(bin_edges[i_edges],
expected_edges[i_edges]),
msg='Testing bin_edges for dim {0}'
''.format(i_edges+1))
def test_nominal_wh_dtype(self):
"""
"""
expected_h_tpl = np.array([2, 1, 1, 1, 1])
expected_c_tpl = np.array([-700.7, -0.5, 0.01, 300.3, 500.5])
expected_h = np.zeros(shape=self.n_bins, dtype=np.double)
expected_c = np.zeros(shape=self.n_bins, dtype=np.float32)
self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)
self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)
histo, cumul, bin_edges = Histogramnd(self.sample,
self.histo_range,
self.n_bins,
weights=self.weights,
wh_dtype=np.float32)
self.assertEqual(cumul.dtype, np.float32)
self.assertTrue(np.array_equal(histo, expected_h))
self.assertTrue(np.allclose(cumul, expected_c))
def test_nominal_uncontiguous_sample(self):
"""
"""
expected_h_tpl = np.array([2, 1, 1, 1, 1])
expected_c_tpl = np.array([-700.7, -0.5, 0.01, 300.3, 500.5])
expected_h = np.zeros(shape=self.n_bins, dtype=np.double)
expected_c = np.zeros(shape=self.n_bins, dtype=np.double)
self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)
self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)
shape = list(self.sample.shape)
shape[0] *= 2
sample = np.zeros(shape, dtype=self.sample.dtype)
uncontig_sample = sample[::2, ...]
uncontig_sample[:] = self.sample
self.assertFalse(uncontig_sample.flags['C_CONTIGUOUS'],
msg='Making sure the array is not contiguous.')
histo, cumul, bin_edges = Histogramnd(uncontig_sample,
self.histo_range,
self.n_bins,
weights=self.weights)
self.assertEqual(cumul.dtype, np.float64)
self.assertEqual(histo.dtype, np.uint32)
self.assertTrue(np.array_equal(histo, expected_h))
self.assertTrue(np.array_equal(cumul, expected_c))
def test_nominal_uncontiguous_weights(self):
"""
"""
expected_h_tpl = np.array([2, 1, 1, 1, 1])
expected_c_tpl = np.array([-700.7, -0.5, 0.01, 300.3, 500.5])
expected_h = np.zeros(shape=self.n_bins, dtype=np.double)
expected_c = np.zeros(shape=self.n_bins, dtype=np.double)
self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)
self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)
shape = list(self.weights.shape)
shape[0] *= 2
weights = np.zeros(shape, dtype=self.weights.dtype)
uncontig_weights = weights[::2, ...]
uncontig_weights[:] = self.weights
self.assertFalse(uncontig_weights.flags['C_CONTIGUOUS'],
msg='Making sure the array is not contiguous.')
histo, cumul, bin_edges = Histogramnd(self.sample,
self.histo_range,
self.n_bins,
weights=uncontig_weights)
self.assertEqual(cumul.dtype, np.float64)
self.assertEqual(histo.dtype, np.uint32)
self.assertTrue(np.array_equal(histo, expected_h))
self.assertTrue(np.array_equal(cumul, expected_c))
def test_nominal_wo_weights(self):
"""
"""
expected_h_tpl = np.array([2, 1, 1, 1, 1])
expected_h = np.zeros(shape=self.n_bins, dtype=np.double)
self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)
histo, cumul = Histogramnd(self.sample,
self.histo_range,
self.n_bins,
weights=None)[0:2]
self.assertTrue(np.array_equal(histo, expected_h))
self.assertTrue(cumul is None)
def test_nominal_last_bin_closed(self):
"""
"""
expected_h_tpl = np.array([2, 1, 1, 1, 2])
expected_c_tpl = np.array([-700.7, -0.5, 0.01, 300.3, 1101.1])
expected_h = np.zeros(shape=self.n_bins, dtype=np.double)
expected_c = np.zeros(shape=self.n_bins, dtype=np.double)
self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)
self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)
histo, cumul = Histogramnd(self.sample,
self.histo_range,
self.n_bins,
weights=self.weights,
last_bin_closed=True)[0:2]
self.assertTrue(np.array_equal(histo, expected_h))
self.assertTrue(np.array_equal(cumul, expected_c))
def test_int32_weights_double_weights_range(self):
"""
"""
weight_min = -299.9 # ===> will be cast to -299
weight_max = 499.9 # ===> will be cast to 499
expected_h_tpl = np.array([0, 1, 1, 1, 0])
expected_c_tpl = np.array([0., 0., 0., 300., 0.])
expected_h = np.zeros(shape=self.n_bins, dtype=np.double)
expected_c = np.zeros(shape=self.n_bins, dtype=np.double)
self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)
self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)
histo, cumul = Histogramnd(self.sample,
self.histo_range,
self.n_bins,
weights=self.weights.astype(np.int32),
weight_min=weight_min,
weight_max=weight_max)[0:2]
self.assertTrue(np.array_equal(histo, expected_h))
self.assertTrue(np.array_equal(cumul, expected_c))
def test_nominal_no_sample(self):
"""
"""
histo_inst = Histogramnd(None,
self.histo_range,
self.n_bins)
histo, weighted_histo, edges = histo_inst
self.assertIsNone(histo)
self.assertIsNone(weighted_histo)
self.assertIsNone(edges)
self.assertIsNone(histo_inst.histo)
self.assertIsNone(histo_inst.weighted_histo)
self.assertIsNone(histo_inst.edges)
def test_empty_init_accumulate(self):
"""
"""
expected_h_tpl = np.array([2, 1, 1, 1, 1])
expected_c_tpl = np.array([-700.7, -0.5, 0.01, 300.3, 500.5])
expected_h = np.zeros(shape=self.n_bins, dtype=np.double)
expected_c = np.zeros(shape=self.n_bins, dtype=np.double)
self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)
self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)
histo_inst = Histogramnd(None,
self.histo_range,
self.n_bins)
histo_inst.accumulate(self.sample,
weights=self.weights)
histo = histo_inst.histo
cumul = histo_inst.weighted_histo
bin_edges = histo_inst.edges
expected_edges = _get_bin_edges(self.histo_range,
self.n_bins,
self.ndims)
self.assertEqual(cumul.dtype, np.float64)
self.assertEqual(histo.dtype, np.uint32)
self.assertTrue(np.array_equal(histo, expected_h))
self.assertTrue(np.array_equal(cumul, expected_c))
for i_edges, edges in enumerate(expected_edges):
self.assertTrue(np.array_equal(bin_edges[i_edges],
expected_edges[i_edges]),
msg='Testing bin_edges for dim {0}'
''.format(i_edges+1))
def test_accumulate(self):
"""
"""
expected_h_tpl = np.array([2, 3, 2, 2, 2])
expected_c_tpl = np.array([-700.7, -7007.5, -4.99, 300.4, 3503.5])
expected_h = np.zeros(shape=self.n_bins, dtype=np.double)
expected_c = np.zeros(shape=self.n_bins, dtype=np.double)
self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)
self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)
histo_inst = Histogramnd(self.sample,
self.histo_range,
self.n_bins,
weights=self.weights)
sample_2 = self.sample[:]
if len(sample_2.shape) == 1:
idx = (slice(0, None),)
else:
idx = slice(0, None), self.tested_dim
sample_2[idx] += 2
histo_inst.accumulate(sample_2, # <==== !!
weights=10 * self.weights) # <==== !!
histo = histo_inst.histo
cumul = histo_inst.weighted_histo
bin_edges = histo_inst.edges
self.assertEqual(cumul.dtype, np.float64)
self.assertTrue(np.array_equal(histo, expected_h))
self.assertTrue(np.allclose(cumul, expected_c, rtol=10e-15))
def test_accumulate_no_weights(self):
"""
"""
expected_h_tpl = np.array([2, 3, 2, 2, 2])
expected_c_tpl = np.array([-700.7, -0.5, 0.01, 300.3, 500.5])
expected_h = np.zeros(shape=self.n_bins, dtype=np.double)
expected_c = np.zeros(shape=self.n_bins, dtype=np.double)
self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)
self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)
histo_inst = Histogramnd(self.sample,
self.histo_range,
self.n_bins,
weights=self.weights)
sample_2 = self.sample[:]
if len(sample_2.shape) == 1:
idx = (slice(0, None),)
else:
idx = slice(0, None), self.tested_dim
sample_2[idx] += 2
histo_inst.accumulate(sample_2) # <==== !!
histo = histo_inst.histo
cumul = histo_inst.weighted_histo
bin_edges = histo_inst.edges
self.assertEqual(cumul.dtype, np.float64)
self.assertTrue(np.array_equal(histo, expected_h))
self.assertTrue(np.allclose(cumul, expected_c, rtol=10e-15))
def test_accumulate_no_weights_at_init(self):
"""
"""
expected_h_tpl = np.array([2, 3, 2, 2, 2])
expected_c_tpl = np.array([0.0, -700.7, -0.5, 0.01, 300.3])
expected_h = np.zeros(shape=self.n_bins, dtype=np.double)
expected_c = np.zeros(shape=self.n_bins, dtype=np.double)
self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)
self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)
histo_inst = Histogramnd(self.sample,
self.histo_range,
self.n_bins,
weights=None) # <==== !!
cumul = histo_inst.weighted_histo
self.assertIsNone(cumul)
sample_2 = self.sample[:]
if len(sample_2.shape) == 1:
idx = (slice(0, None),)
else:
idx = slice(0, None), self.tested_dim
sample_2[idx] += 2
histo_inst.accumulate(sample_2,
weights=self.weights) # <==== !!
histo = histo_inst.histo
cumul = histo_inst.weighted_histo
bin_edges = histo_inst.edges
self.assertEqual(cumul.dtype, np.float64)
self.assertTrue(np.array_equal(histo, expected_h))
self.assertTrue(np.array_equal(cumul, expected_c))
def testNoneNativeTypes(self):
type = self.sample.dtype.newbyteorder("B")
sampleB = self.sample.astype(type)
type = self.sample.dtype.newbyteorder("L")
sampleL = self.sample.astype(type)
histo_inst = Histogramnd(sampleB,
self.histo_range,
self.n_bins,
weights=self.weights)
histo_inst = Histogramnd(sampleL,
self.histo_range,
self.n_bins,
weights=self.weights)
class Test_chistogram_nominal_1d(_Test_chistogramnd_nominal):
__test__ = True # because _Test_chistogramnd_nominal is ignored
ndims = 1
class Test_chistogram_nominal_2d(_Test_chistogramnd_nominal):
__test__ = True # because _Test_chistogramnd_nominal is ignored
ndims = 2
class Test_chistogram_nominal_3d(_Test_chistogramnd_nominal):
__test__ = True # because _Test_chistogramnd_nominal is ignored
ndims = 3
class Test_Histogramnd_nominal_1d(_Test_Histogramnd_nominal):
__test__ = True # because _Test_chistogramnd_nominal is ignored
ndims = 1
class Test_Histogramnd_nominal_2d(_Test_Histogramnd_nominal):
__test__ = True # because _Test_chistogramnd_nominal is ignored
ndims = 2
class Test_Histogramnd_nominal_3d(_Test_Histogramnd_nominal):
__test__ = True # because _Test_chistogramnd_nominal is ignored
ndims = 3
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-01-11 20:31
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crm', '0017_auto_20180111_2022'),
]
operations = [
migrations.AlterModelOptions(
name='textparagraphinsalesdocument',
options={'verbose_name': 'Text Paragraph In Sales Document', 'verbose_name_plural': 'Text Paragraphs In Sales Documents'},
),
migrations.RenameField(
model_name='deliverynote',
old_name='salescontract_ptr',
new_name='salesdocument_ptr',
),
migrations.RenameField(
model_name='emailaddressforsalesdocument',
old_name='contract',
new_name='sales_document',
),
migrations.RenameField(
model_name='invoice',
old_name='salescontract_ptr',
new_name='salesdocument_ptr',
),
migrations.RenameField(
model_name='paymentreminder',
old_name='salescontract_ptr',
new_name='salesdocument_ptr',
),
migrations.RenameField(
model_name='phoneaddressforsalesdocument',
old_name='contract',
new_name='sales_document',
),
migrations.RenameField(
model_name='postaladdressforsalesdocument',
old_name='contract',
new_name='sales_document',
),
migrations.RenameField(
model_name='purchaseconfirmation',
old_name='salescontract_ptr',
new_name='salesdocument_ptr',
),
migrations.RenameField(
model_name='quote',
old_name='salescontract_ptr',
new_name='salesdocument_ptr',
),
migrations.RenameField(
model_name='salesdocumentposition',
old_name='contract',
new_name='sales_document',
),
migrations.RenameField(
model_name='textparagraphinsalesdocument',
old_name='sales_contract',
new_name='sales_document',
),
]
|
# Copyright (C) 2010-2018 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
import sys
import unittest
if sys.version_info > (3, 0):
from io import StringIO
else:
from StringIO import StringIO
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from webkit import messages
from webkit import parser
script_directory = os.path.dirname(os.path.realpath(__file__))
reset_results = False
with open(os.path.join(script_directory, 'test.messages.in')) as in_file:
_messages_file_contents = in_file.read()
with open(os.path.join(script_directory, 'test-legacy.messages.in')) as in_file:
_legacy_messages_file_contents = in_file.read()
with open(os.path.join(script_directory, 'test-superclass.messages.in')) as in_file:
_superclass_messages_file_contents = in_file.read()
_expected_receiver_header_file_name = '../testMessages.h'
_expected_legacy_receiver_header_file_name = '../test-legacyMessages.h'
_expected_superclass_receiver_header_file_name = '../test-superclassMessages.h'
_expected_receiver_implementation_file_name = '../testMessageReceiver.cpp'
_expected_legacy_receiver_implementation_file_name = '../test-legacyMessageReceiver.cpp'
_expected_superclass_receiver_implementation_file_name = '../test-superclassMessageReceiver.cpp'
_expected_results = {
'name': 'WebPage',
'conditions': ('(ENABLE(WEBKIT2) && (NESTED_MASTER_CONDITION || MASTER_OR && MASTER_AND))'),
'messages': (
{
'name': 'LoadURL',
'parameters': (
('String', 'url'),
),
'conditions': (None),
},
{
'name': 'LoadSomething',
'parameters': (
('String', 'url'),
),
'conditions': ('ENABLE(TOUCH_EVENTS)'),
},
{
'name': 'TouchEvent',
'parameters': (
('WebKit::WebTouchEvent', 'event'),
),
'conditions': ('(ENABLE(TOUCH_EVENTS) && (NESTED_MESSAGE_CONDITION || SOME_OTHER_MESSAGE_CONDITION))'),
},
{
'name': 'AddEvent',
'parameters': (
('WebKit::WebTouchEvent', 'event'),
),
'conditions': ('(ENABLE(TOUCH_EVENTS) && (NESTED_MESSAGE_CONDITION && SOME_OTHER_MESSAGE_CONDITION))'),
},
{
'name': 'LoadSomethingElse',
'parameters': (
('String', 'url'),
),
'conditions': ('ENABLE(TOUCH_EVENTS)'),
},
{
'name': 'DidReceivePolicyDecision',
'parameters': (
('uint64_t', 'frameID'),
('uint64_t', 'listenerID'),
('uint32_t', 'policyAction'),
),
'conditions': (None),
},
{
'name': 'Close',
'parameters': (),
'conditions': (None),
},
{
'name': 'PreferencesDidChange',
'parameters': (
('WebKit::WebPreferencesStore', 'store'),
),
'conditions': (None),
},
{
'name': 'SendDoubleAndFloat',
'parameters': (
('double', 'd'),
('float', 'f'),
),
'conditions': (None),
},
{
'name': 'SendInts',
'parameters': (
('Vector<uint64_t>', 'ints'),
('Vector<Vector<uint64_t>>', 'intVectors')
),
'conditions': (None),
},
{
'name': 'CreatePlugin',
'parameters': (
('uint64_t', 'pluginInstanceID'),
('WebKit::Plugin::Parameters', 'parameters')
),
'reply_parameters': (
('bool', 'result'),
),
'conditions': (None),
},
{
'name': 'RunJavaScriptAlert',
'parameters': (
('uint64_t', 'frameID'),
('String', 'message')
),
'reply_parameters': (),
'conditions': (None),
},
{
'name': 'GetPlugins',
'parameters': (
('bool', 'refresh'),
),
'reply_parameters': (
('Vector<WebCore::PluginInfo>', 'plugins'),
),
'conditions': (None),
},
{
'name': 'GetPluginProcessConnection',
'parameters': (
('String', 'pluginPath'),
),
'reply_parameters': (
('IPC::Connection::Handle', 'connectionHandle'),
),
'conditions': (None),
},
{
'name': 'TestMultipleAttributes',
'parameters': (
),
'reply_parameters': (
),
'conditions': (None),
},
{
'name': 'TestParameterAttributes',
'parameters': (
('uint64_t', 'foo', ('AttributeOne', 'AttributeTwo')),
('double', 'bar'),
('double', 'baz', ('AttributeThree',)),
),
'conditions': (None),
},
{
'name': 'TemplateTest',
'parameters': (
('HashMap<String, std::pair<String, uint64_t>>', 'a'),
),
'conditions': (None),
},
{
'name': 'SetVideoLayerID',
'parameters': (
('WebCore::GraphicsLayer::PlatformLayerID', 'videoLayerID'),
),
'conditions': (None),
},
{
'name': 'DidCreateWebProcessConnection',
'parameters': (
('IPC::MachPort', 'connectionIdentifier'),
('OptionSet<WebKit::SelectionFlags>', 'flags'),
),
'conditions': ('PLATFORM(MAC)'),
},
{
'name': 'InterpretKeyEvent',
'parameters': (
('uint32_t', 'type'),
),
'reply_parameters': (
('Vector<WebCore::KeypressCommand>', 'commandName'),
),
'conditions': ('PLATFORM(MAC)'),
},
{
'name': 'DeprecatedOperation',
'parameters': (
('IPC::DummyType', 'dummy'),
),
'conditions': ('ENABLE(DEPRECATED_FEATURE)'),
},
{
'name': 'ExperimentalOperation',
'parameters': (
('IPC::DummyType', 'dummy'),
),
'conditions': ('ENABLE(EXPERIMENTAL_FEATURE)'),
}
),
}
_expected_superclass_results = {
'name': 'WebPage',
'superclass': 'WebPageBase',
'conditions': None,
'messages': (
{
'name': 'LoadURL',
'parameters': (
('String', 'url'),
),
'conditions': (None),
},
{
'name': 'TestAsyncMessage',
'parameters': (
('WebKit::TestTwoStateEnum', 'twoStateEnum'),
),
'reply_parameters': (
('uint64_t', 'result'),
),
'conditions': ('ENABLE(TEST_FEATURE)'),
},
{
'name': 'TestAsyncMessageWithNoArguments',
'parameters': (),
'reply_parameters': (),
'conditions': ('ENABLE(TEST_FEATURE)'),
},
{
'name': 'TestAsyncMessageWithMultipleArguments',
'parameters': (),
'reply_parameters': (
('bool', 'flag'),
('uint64_t', 'value'),
),
'conditions': ('ENABLE(TEST_FEATURE)'),
},
{
'name': 'TestAsyncMessageWithConnection',
'parameters': (
('int', 'value'),
),
'reply_parameters': (
('bool', 'flag'),
),
'conditions': ('ENABLE(TEST_FEATURE)'),
},
{
'name': 'TestSyncMessage',
'parameters': (
('uint32_t', 'param'),
),
'reply_parameters': (
('uint8_t', 'reply'),
),
'conditions': (None),
},
{
'name': 'TestSynchronousMessage',
'parameters': (
('bool', 'value'),
),
'reply_parameters': (
('Optional<WebKit::TestClassName>', 'optionalReply'),
),
'conditions': (None),
},
),
}
class MessagesTest(unittest.TestCase):
def setUp(self):
self.receiver = parser.parse(StringIO(_messages_file_contents))
self.legacy_receiver = parser.parse(StringIO(_legacy_messages_file_contents))
self.superclass_receiver = parser.parse(StringIO(_superclass_messages_file_contents))
class ParsingTest(MessagesTest):
def check_message(self, message, expected_message):
self.assertEquals(message.name, expected_message['name'])
self.assertEquals(len(message.parameters), len(expected_message['parameters']))
for index, parameter in enumerate(message.parameters):
expected_parameter = expected_message['parameters'][index]
self.assertEquals(parameter.type, expected_parameter[0])
self.assertEquals(parameter.name, expected_parameter[1])
if len(expected_parameter) > 2:
self.assertEquals(parameter.attributes, frozenset(expected_parameter[2]))
for attribute in expected_parameter[2]:
self.assertTrue(parameter.has_attribute(attribute))
else:
self.assertEquals(parameter.attributes, frozenset())
if message.reply_parameters is not None:
for index, parameter in enumerate(message.reply_parameters):
self.assertEquals(parameter.type, expected_message['reply_parameters'][index][0])
self.assertEquals(parameter.name, expected_message['reply_parameters'][index][1])
else:
self.assertFalse('reply_parameters' in expected_message)
self.assertEquals(message.condition, expected_message['conditions'])
def test_receiver(self):
"""Receiver should be parsed as expected"""
self.assertEquals(self.receiver.name, _expected_results['name'])
self.assertEquals(self.receiver.condition, _expected_results['conditions'])
self.assertEquals(len(self.receiver.messages), len(_expected_results['messages']))
for index, message in enumerate(self.receiver.messages):
self.check_message(message, _expected_results['messages'][index])
self.assertEquals(self.legacy_receiver.name, _expected_results['name'])
self.assertEquals(self.legacy_receiver.condition, _expected_results['conditions'])
self.assertEquals(len(self.legacy_receiver.messages), len(_expected_results['messages']))
for index, message in enumerate(self.legacy_receiver.messages):
self.check_message(message, _expected_results['messages'][index])
self.assertEquals(self.superclass_receiver.name, _expected_superclass_results['name'])
self.assertEquals(self.superclass_receiver.superclass, _expected_superclass_results['superclass'])
self.assertEquals(len(self.superclass_receiver.messages), len(_expected_superclass_results['messages']))
for index, message in enumerate(self.superclass_receiver.messages):
self.check_message(message, _expected_superclass_results['messages'][index])
class GeneratedFileContentsTest(unittest.TestCase):
def assertGeneratedFileContentsEqual(self, actual_file_contents, expected_file_name):
try:
if reset_results:
with open(os.path.join(script_directory, expected_file_name), mode='w') as out_file:
out_file.write(actual_file_contents)
return
with open(os.path.join(script_directory, expected_file_name), mode='r') as in_file:
expected_file_contents = in_file.read()
actual_line_list = actual_file_contents.splitlines(False)
expected_line_list = expected_file_contents.splitlines(False)
for index, actual_line in enumerate(actual_line_list):
self.assertEquals(actual_line, expected_line_list[index])
self.assertEquals(len(actual_line_list), len(expected_line_list))
except:
sys.stderr.write('In expected file %s\n' % expected_file_name)
raise
def assertHeaderEqual(self, input_messages_file_contents, expected_file_name):
actual_file_contents = messages.generate_messages_header(parser.parse(StringIO(input_messages_file_contents)))
self.assertGeneratedFileContentsEqual(actual_file_contents, expected_file_name)
def assertImplementationEqual(self, input_messages_file_contents, expected_file_name):
actual_file_contents = messages.generate_message_handler(parser.parse(StringIO(input_messages_file_contents)))
self.assertGeneratedFileContentsEqual(actual_file_contents, expected_file_name)
class HeaderTest(GeneratedFileContentsTest):
def test_receiver_headers(self):
self.assertHeaderEqual(_messages_file_contents,
_expected_receiver_header_file_name)
self.assertHeaderEqual(_legacy_messages_file_contents,
_expected_legacy_receiver_header_file_name)
self.assertHeaderEqual(_superclass_messages_file_contents,
_expected_superclass_receiver_header_file_name)
class ReceiverImplementationTest(GeneratedFileContentsTest):
def test_receiver_implementations(self):
self.assertImplementationEqual(_messages_file_contents,
_expected_receiver_implementation_file_name)
self.assertImplementationEqual(_legacy_messages_file_contents,
_expected_legacy_receiver_implementation_file_name)
self.assertImplementationEqual(_superclass_messages_file_contents,
_expected_superclass_receiver_implementation_file_name)
class UnsupportedPrecompilerDirectiveTest(unittest.TestCase):
def test_error_at_else(self):
with self.assertRaisesRegexp(Exception, r"ERROR: '#else.*' is not supported in the \*\.in files"):
messages.generate_message_handler(parser.parse(StringIO("asd\n#else bla\nfoo")))
def test_error_at_elif(self):
with self.assertRaisesRegexp(Exception, r"ERROR: '#elif.*' is not supported in the \*\.in files"):
messages.generate_message_handler(parser.parse(StringIO("asd\n#elif bla\nfoo")))
def add_reset_results_to_unittest_help():
script_name = os.path.basename(__file__)
reset_results_help = '''
Custom Options:
-r, --reset-results Reset expected results for {0}
'''.format(script_name)
options_regex = re.compile('^Usage:')
lines = unittest.TestProgram.USAGE.splitlines(True)
index = 0
for index, line in enumerate(lines):
if options_regex.match(line) and index + 1 < len(lines):
lines.insert(index + 1, reset_results_help)
break
if index == (len(lines) - 1):
lines.append(reset_results_help)
unittest.TestProgram.USAGE = ''.join(lines)
def parse_sys_argv():
global reset_results
for index, arg in enumerate(sys.argv[1:]):
if arg in ('-r', '--r', '--reset', '--reset-results') or '--reset-results'.startswith(arg):
reset_results = True
del sys.argv[index + 1]
break
if __name__ == '__main__':
add_reset_results_to_unittest_help()
parse_sys_argv()
unittest.main()
|
import os
from getpass import getpass
from telethon import TelegramClient, ConnectionMode
from telethon.errors import SessionPasswordNeededError
from telethon.tl.types import (
UpdateShortChatMessage, UpdateShortMessage, PeerChat
)
from telethon.utils import get_display_name
def sprint(string, *args, **kwargs):
"""Safe Print (handle UnicodeEncodeErrors on some terminals)"""
try:
print(string, *args, **kwargs)
except UnicodeEncodeError:
string = string.encode('utf-8', errors='ignore')\
.decode('ascii', errors='ignore')
print(string, *args, **kwargs)
def print_title(title):
# Clear previous window
print('\n')
print('=={}=='.format('=' * len(title)))
sprint('= {} ='.format(title))
print('=={}=='.format('=' * len(title)))
def bytes_to_string(byte_count):
"""Converts a byte count to a string (in KB, MB...)"""
suffix_index = 0
while byte_count >= 1024:
byte_count /= 1024
suffix_index += 1
return '{:.2f}{}'.format(byte_count,
[' bytes', 'KB', 'MB', 'GB', 'TB'][suffix_index])
class InteractiveTelegramClient(TelegramClient):
"""Full featured Telegram client, meant to be used on an interactive
session to see what Telethon is capable off -
This client allows the user to perform some basic interaction with
Telegram through Telethon, such as listing dialogs (open chats),
talking to people, downloading media, and receiving updates.
"""
def __init__(self, session_user_id, user_phone, api_id, api_hash,
proxy=None):
print_title('Initialization')
print('Initializing interactive example...')
super().__init__(
session_user_id, api_id, api_hash,
connection_mode=ConnectionMode.TCP_ABRIDGED,
proxy=proxy,
update_workers=1
)
# Store all the found media in memory here,
# so it can be downloaded if the user wants
self.found_media = set()
print('Connecting to Telegram servers...')
if not self.connect():
print('Initial connection failed. Retrying...')
if not self.connect():
print('Could not connect to Telegram servers.')
return
# Then, ensure we're authorized and have access
if not self.is_user_authorized():
print('First run. Sending code request...')
self.send_code_request(user_phone)
self_user = None
while self_user is None:
code = input('Enter the code you just received: ')
try:
self_user = self.sign_in(user_phone, code)
# Two-step verification may be enabled
except SessionPasswordNeededError:
pw = getpass('Two step verification is enabled. '
'Please enter your password: ')
self_user = self.sign_in(password=pw)
def run(self):
# Listen for updates
self.add_update_handler(self.update_handler)
# Enter a while loop to chat as long as the user wants
while True:
# Retrieve the top dialogs
dialog_count = 15
# Entities represent the user, chat or channel
# corresponding to the dialog on the same index
dialogs, entities = self.get_dialogs(limit=dialog_count)
i = None
while i is None:
print_title('Dialogs window')
# Display them so the user can choose
for i, entity in enumerate(entities, start=1):
sprint('{}. {}'.format(i, get_display_name(entity)))
# Let the user decide who they want to talk to
print()
print('> Who do you want to send messages to?')
print('> Available commands:')
print(' !q: Quits the dialogs window and exits.')
print(' !l: Logs out, terminating this session.')
print()
i = input('Enter dialog ID or a command: ')
if i == '!q':
return
if i == '!l':
self.log_out()
return
try:
i = int(i if i else 0) - 1
# Ensure it is inside the bounds, otherwise retry
if not 0 <= i < dialog_count:
i = None
except ValueError:
i = None
# Retrieve the selected user (or chat, or channel)
entity = entities[i]
# Show some information
print_title('Chat with "{}"'.format(get_display_name(entity)))
print('Available commands:')
print(' !q: Quits the current chat.')
print(' !Q: Quits the current chat and exits.')
print(' !h: prints the latest messages (message History).')
print(' !up <path>: Uploads and sends the Photo from path.')
print(' !uf <path>: Uploads and sends the File from path.')
print(' !d <msg-id>: Deletes a message by its id')
print(' !dm <msg-id>: Downloads the given message Media (if any).')
print(' !dp: Downloads the current dialog Profile picture.')
print()
# And start a while loop to chat
while True:
msg = input('Enter a message: ')
# Quit
if msg == '!q':
break
elif msg == '!Q':
return
# History
elif msg == '!h':
# First retrieve the messages and some information
total_count, messages, senders = self.get_message_history(
entity, limit=10)
# Iterate over all (in reverse order so the latest appear
# the last in the console) and print them with format:
# "[hh:mm] Sender: Message"
for msg, sender in zip(
reversed(messages), reversed(senders)):
# Get the name of the sender if any
if sender:
name = getattr(sender, 'first_name', None)
if not name:
name = getattr(sender, 'title')
if not name:
name = '???'
else:
name = '???'
# Format the message content
if getattr(msg, 'media', None):
self.found_media.add(msg)
# The media may or may not have a caption
caption = getattr(msg.media, 'caption', '')
content = '<{}> {}'.format(
type(msg.media).__name__, caption)
elif hasattr(msg, 'message'):
content = msg.message
elif hasattr(msg, 'action'):
content = str(msg.action)
else:
# Unknown message, simply print its class name
content = type(msg).__name__
# And print it to the user
sprint('[{}:{}] (ID={}) {}: {}'.format(
msg.date.hour, msg.date.minute, msg.id, name,
content))
# Send photo
elif msg.startswith('!up '):
# Slice the message to get the path
self.send_photo(path=msg[len('!up '):], entity=entity)
# Send file (document)
elif msg.startswith('!uf '):
# Slice the message to get the path
self.send_document(path=msg[len('!uf '):], entity=entity)
# Delete messages
elif msg.startswith('!d '):
# Slice the message to get message ID
deleted_msg = self.delete_messages(entity, msg[len('!d '):])
print('Deleted. {}'.format(deleted_msg))
# Download media
elif msg.startswith('!dm '):
# Slice the message to get message ID
self.download_media_by_id(msg[len('!dm '):])
# Download profile photo
elif msg == '!dp':
print('Downloading profile picture to usermedia/...')
os.makedirs('usermedia', exist_ok=True)
output = self.download_profile_photo(entity, 'usermedia')
if output:
print(
'Profile picture downloaded to {}'.format(output)
)
else:
print('No profile picture found for this user.')
# Send chat message (if any)
elif msg:
self.send_message(
entity, msg, link_preview=False)
def send_photo(self, path, entity):
self.send_file(
entity, path,
progress_callback=self.upload_progress_callback
)
print('Photo sent!')
def send_document(self, path, entity):
self.send_file(
entity, path,
force_document=True,
progress_callback=self.upload_progress_callback
)
print('Document sent!')
def download_media_by_id(self, media_id):
try:
# The user may have entered a non-integer string!
msg_media_id = int(media_id)
# Search the message ID
for msg in self.found_media:
if msg.id == msg_media_id:
print('Downloading media to usermedia/...')
os.makedirs('usermedia', exist_ok=True)
output = self.download_media(
msg.media,
file='usermedia/',
progress_callback=self.download_progress_callback
)
print('Media downloaded to {}!'.format(output))
except ValueError:
print('Invalid media ID given!')
@staticmethod
def download_progress_callback(downloaded_bytes, total_bytes):
InteractiveTelegramClient.print_progress(
'Downloaded', downloaded_bytes, total_bytes
)
@staticmethod
def upload_progress_callback(uploaded_bytes, total_bytes):
InteractiveTelegramClient.print_progress(
'Uploaded', uploaded_bytes, total_bytes
)
@staticmethod
def print_progress(progress_type, downloaded_bytes, total_bytes):
print('{} {} out of {} ({:.2%})'.format(
progress_type, bytes_to_string(downloaded_bytes),
bytes_to_string(total_bytes), downloaded_bytes / total_bytes)
)
def update_handler(self, update):
if isinstance(update, UpdateShortMessage):
who = self.get_entity(update.user_id)
if update.out:
sprint('>> "{}" to user {}'.format(
update.message, get_display_name(who)
))
else:
sprint('<< {} sent "{}"]'.format(
get_display_name(who), update.message
))
elif isinstance(update, UpdateShortChatMessage):
which = self.get_entity(PeerChat(update.chat_id))
if update.out:
sprint('>> sent "{}" to chat {}'.format(
update.message, get_display_name(which)
))
else:
who = self.get_entity(update.from_id)
sprint('<< {} @ {} sent "{}"'.format(
get_display_name(which), get_display_name(who),
update.message
))
|
"""
The point cloud that has the largest number of points
"""
import numpy as np
import torch
import glob
import os
import sys
scannet_dir = "/home/dtc/Backup/Data/ScanNet"
# path to pth
original_dir = os.path.join(scannet_dir, "Pth/Original")
pth_files = glob.glob(os.path.join(original_dir, "*.pth"))
n_points_max = 0
name_points_max = ''
for pth_file in pth_files:
data = torch.load(pth_file)
coords, colors, labels = data
n_points = len(coords)
if n_points > n_points_max:
n_points_max = n_points
name_points_max = os.path.basename(pth_file)
print(name_points_max)
print(n_points_max)
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'weeklyemailapp_1.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# Copyright 2014-2015 RethinkDB, all rights reserved.
import itertools, os, sys, time
try:
xrange
except NameError:
xrange = range
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common'))
import rdb_unittest, utils
# ---
class SquashBase(rdb_unittest.RdbTestCase):
'''Squash tests'''
# Local variables
squash = True
field = "id"
generator = itertools.count()
records = 0
limit = 0
multi = False
def setUp(self):
super(SquashBase, self).setUp()
# The generator emits values in increasing order thus we store the first twenty
# values for later use, specifically to do inserts and updates, and test that no
# change is emitted.
self._generator_initial_len = 20
self._generator_initial = []
for x in xrange(self._generator_initial_len):
self._generator_initial.append((x, next(self.generator)))
# The primary key is used to break ties in a multi index thus `self._document`
# has the option of generating an increasing key instead of them being
# auto-generated.
self._key_generator = itertools.count(self._generator_initial_len)
if self.multi:
# The generator for multi indices return an array, and the length of that
# array is a factor in the number of results from a changefeed.
self._multi_len = len(self._generator_initial[0][1])
self._primary_key = self.r.db(self.dbName) \
.table(self.tableName) \
.info()["primary_key"] \
.run(self.conn)
# Generate the records ..
for x in xrange(self.records):
self.r.db(self.dbName) \
.table(self.tableName) \
.insert(self._document(next(self.generator))) \
.run(self.conn)
# .. and add the requested index if necessary
if self.field != self._primary_key:
self.r.db(self.dbName) \
.table(self.tableName) \
.index_create(self.field, multi=self.multi) \
.run(self.conn)
self.r.db(self.dbName) \
.table(self.tableName) \
.index_wait(self.field) \
.run(self.conn)
# The changefeeds are requested through a separate connection
self._feed_conn = self.r.connect(
self.cluster[0].host, self.cluster[0].driver_port)
def _document(self, value, key=None, key_generate=None):
# An increasing primary key is automatically added to multi indices as they
# influence sorting.
if key_generate is None:
key_generate = self.multi
document = {
self.field: value
}
if key is None and key_generate:
key = "g-%i" % next(self._key_generator)
if key is not None:
self.assertTrue(self.field != self._primary_key)
document[self._primary_key] = key
return document
def test_insert(self):
query = self.r.db(self.dbName) \
.table(self.tableName) \
.order_by(index=self.r.desc(self.field)) \
.limit(self.limit) \
.changes(squash=self.squash, include_initial=True)
with utils.NextWithTimeout(query.run(self._feed_conn), stopOnEmpty=False) as feed:
changes = min(self.records, self.limit)
if self.multi:
changes = min(
self.records * self._multi_len, self.limit)
initial = []
for x in xrange(changes):
initial.append(next(feed))
# If the number of records is greater than the limit then then insert a low
# value and verify it does not show up as a change, due to the `order_by`.
if self.records >= self.limit:
_, value = self._generator_initial.pop()
self.r.db(self.dbName) \
.table(self.tableName) \
.insert(self._document(value)) \
.run(self.conn)
self.assertRaises(Exception, feed.next)
# Insert a value and verify it does show up.
value = next(self.generator)
document = self._document(value)
key = self.r.db(self.dbName) \
.table(self.tableName) \
.insert(document, return_changes=True) \
.run(self.conn) \
.get("generated_keys", [document.get("id", value)])[0]
# With multi indices a single document may show up multiple times in the
# changefeed, `changes` calculates the number to verify it does indeed show
# up the expected number of times.
changes = 1
if self.multi:
changes = min(self._multi_len, self.limit)
for x in xrange(changes):
feed_next = next(feed)
self.assertTrue("old_val" in feed_next)
self.assertTrue("new_val" in feed_next)
# It depends on whether the initial limit was fulfilled whether the
# change has an "old_val" set to `None` or a document.
if len(initial) + x >= self.limit:
# Note that the initial values are ordered descending, hence
# the comparison with initial[-(x + 1)]
self.assertEqual(
feed_next["old_val"][self.field],
initial[-(x + 1)]["new_val"][self.field])
else:
self.assertEqual(feed_next["old_val"], None)
self.assertEqual(feed_next["new_val"]["id"], key)
self.assertEqual(feed_next["new_val"][self.field], value)
def test_insert_batch(self):
# FIXME: Python 2.7 has new facilities allowing tests to be skipped, use those
# when we no longer need to support 2.6
if self.squash == True:
# With squash True it might not squash agressively enough for this to be
# predictable, skip it
return
query = self.r.db(self.dbName) \
.table(self.tableName) \
.order_by(index=self.r.desc(self.field)) \
.limit(self.limit) \
.changes(squash=self.squash, include_initial=True)
with utils.NextWithTimeout(query.run(self._feed_conn), stopOnEmpty=False) as feed:
changes = min(self.records, self.limit)
if self.multi:
changes = min(
self.records * self._multi_len, self.limit)
initial = []
for x in xrange(changes):
initial.append(next(feed))
# Insert two more documents than the limit as a single batch, due to the
# squashing we should never get a change for the first two.
documents = []
for x in xrange(self.limit + 2):
value = next(self.generator)
if self.field == self._primary_key:
documents.append(self._document(value))
else:
documents.append(self._document(value, key=x))
# A document with duplicate primary key should be ignored as well.
error = documents[-1].copy()
error.update({"error": True})
documents.append(error)
self.r.db(self.dbName) \
.table(self.tableName) \
.insert(documents) \
.run(self.conn)
for x in xrange(self.limit):
feed_next = next(feed)
self.assertTrue("old_val" in feed_next)
self.assertTrue("new_val" in feed_next)
if len(initial) + x >= self.limit:
self.assertTrue(
feed_next["old_val"] in map(lambda x: x["new_val"], initial))
else:
self.assertEqual(feed_next["old_val"], None)
self.assertTrue(feed_next["new_val"] in documents[2:])
self.assertTrue(not "error" in feed_next["new_val"])
def test_delete(self):
query = self.r.db(self.dbName) \
.table(self.tableName) \
.order_by(index=self.r.desc(self.field)) \
.limit(self.limit) \
.changes(squash=self.squash, include_initial=True)
with utils.NextWithTimeout(query.run(self._feed_conn), stopOnEmpty=False) as feed:
changes = min(self.records, self.limit)
if self.multi:
changes = min(
self.records * self._multi_len, self.limit)
initial = []
for x in xrange(changes):
initial.append(next(feed))
# If the number of records is greater than the limit then insert and
# subsequently delete a low value, and verify it does not show up as a
# change because of the `order_by`.
if self.records >= self.limit:
_, value = self._generator_initial.pop()
key = self.r.db(self.dbName) \
.table(self.tableName) \
.insert(self._document(value), return_changes=True) \
.run(self.conn).get("generated_keys", [value])[0]
self.r.db(self.dbName) \
.table(self.tableName) \
.get(key) \
.delete() \
.run(self.conn)
# In inserting this document we have to do somewhat of a dance to get its
# primary key as it might either be the field, generated by us because of a
# multi index, or auto-generated.
value = next(self.generator)
document = self._document(value)
key = self.r.db(self.dbName) \
.table(self.tableName) \
.insert(document, return_changes=True) \
.run(self.conn) \
.get("generated_keys", [document.get("id", value)])[0]
changes = 1
if self.multi:
changes = min(self._multi_len, self.limit)
for x in xrange(changes):
next(feed)
# With the primary key delete the record again.
self.r.db(self.dbName) \
.table(self.tableName) \
.get(key) \
.delete() \
.run(self.conn)
for x in xrange(changes):
feed_next = next(feed)
self.assertTrue("old_val" in feed_next)
self.assertTrue("new_val" in feed_next)
self.assertTrue(feed_next["old_val"][self.field] < value or (
feed_next["old_val"][self.field] == value and
feed_next["old_val"]["id"] <= key))
if len(initial) + x < self.limit:
self.assertEqual(feed_next["new_val"], None)
def test_replace_key(self):
# FIXME: Python 2.7 has new facilities allowing tests to be skipped, use those
# when we no longer need to support 2.6
if self.field == self._primary_key:
# The primary key can not be updated, skip it
return
query = self.r.db(self.dbName) \
.table(self.tableName) \
.order_by(index=self.r.desc(self.field)) \
.limit(self.limit) \
.changes(squash=self.squash, include_initial=True)
with utils.NextWithTimeout(query.run(self._feed_conn), stopOnEmpty=False) as feed:
changes = min(self.records, self.limit)
if self.multi:
changes = min(
self.records * self._multi_len, self.limit)
initial = []
for x in xrange(changes):
initial.append(next(feed))
# Insert a low value, this may or may not cause changes depending on whether
# we've had more initial changes than the limit.
index, value = self._generator_initial.pop()
document = self._document(value, "g-%i" % index)
key = self.r.db(self.dbName) \
.table(self.tableName) \
.insert(document, return_changes=True) \
.run(self.conn) \
.get("generated_keys", [document.get("id", value)])[0]
changes = 0
if len(initial) < self.limit:
changes = 1
if self.multi:
changes = min(self._multi_len, self.limit - len(initial))
for x in xrange(changes):
feed_next = next(feed)
self.assertTrue("old_val" in feed_next)
self.assertTrue("new_val" in feed_next)
if len(initial) + x < self.limit:
self.assertEqual(feed_next["old_val"], None)
self.assertEqual(feed_next["new_val"]["id"], key)
self.assertEqual(feed_next["new_val"][self.field], value)
# Update the key to a higher value, this should produce a change (or changes
# in the case of a multi index).
update = next(self.generator)
self.r.db(self.dbName) \
.table(self.tableName) \
.get(key) \
.update({
self.field: update
}) \
.run(self.conn)
changes = 1
if self.multi:
changes = min(self._multi_len, self.limit)
for x in xrange(changes):
feed_next = next(feed)
self.assertTrue("old_val" in feed_next)
self.assertTrue("new_val" in feed_next)
self.assertTrue(
feed_next["old_val"][self.field] <= feed_next["new_val"][self.field])
self.assertEqual(feed_next["new_val"]["id"], key)
self.assertEqual(feed_next["new_val"][self.field], update)
# Update the key back to the lower value.
self.r.db(self.dbName) \
.table(self.tableName) \
.get(key) \
.update({
self.field: value
}) \
.run(self.conn)
changes = 1
if self.multi:
changes = min(self._multi_len, self.limit)
for x in xrange(changes):
feed_next = next(feed)
self.assertTrue("old_val" in feed_next)
self.assertTrue("new_val" in feed_next)
self.assertTrue(feed_next["old_val"][self.field] <= update)
self.assertTrue(feed_next["new_val"][self.field] >= value)
def bare_test_squash_to_nothing_insert_delete(self):
# FIXME: Python 2.7 has new facilities allowing tests to be skipped, use those
# when we no longer need to support 2.6
if self.squash == True:
# This is too unpredictable
return
query = self.r.db(self.dbName) \
.table(self.tableName) \
.order_by(index=self.r.desc(self.field)) \
.limit(self.limit) \
.changes(squash=self.squash, include_initial=False)
with utils.NextWithTimeout(query.run(self._feed_conn), stopOnEmpty=False) as feed:
changes = min(self.records, self.limit)
if self.multi:
changes = min(
self.records * self._multi_len, self.limit)
# An insert followed by a delete within a two-second squashing period should
# not lead to a change being emitted.
value = next(self.generator)
key = self.r.db(self.dbName) \
.table(self.tableName) \
.insert(self._document(value, key_generate=False), return_changes=True) \
.run(self.conn).get("generated_keys", [value])[0]
self.r.db(self.dbName) \
.table(self.tableName) \
.get(key) \
.delete() \
.run(self.conn)
def test_squash_to_nothing_delete_insert(self):
# This test is similar to the one above but must be done in a separate function
# due to timing issues
# FIXME: Python 2.7 has new facilities allowing tests to be skipped, use those
# when we no longer need to support 2.6
if self.squash == True:
# This is too unpredictable
return
query = self.r.db(self.dbName) \
.table(self.tableName) \
.order_by(index=self.r.desc(self.field)) \
.limit(self.limit) \
.changes(squash=self.squash, include_initial=True)
with utils.NextWithTimeout(query.run(self._feed_conn), stopOnEmpty=False) as feed:
changes = min(self.records, self.limit)
if self.multi:
changes = min(
self.records * self._multi_len, self.limit)
initial = []
for x in xrange(changes):
initial.append(next(feed))
# As above, deleting and re-inserting a value should not lead to a change
# being emitted.
if len(initial):
self.r.db(self.dbName) \
.table(self.tableName) \
.get(initial[0]["new_val"]["id"]) \
.delete() \
.run(self.conn)
self.r.db(self.dbName) \
.table(self.tableName) \
.insert(initial[0]["new_val"]) \
.run(self.conn)
class MultiGenerator(object):
def __init__(self):
self._count = itertools.count(3)
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
# This is crafted to be predictable, imagine you have an initial set
# [
# {u'new_val': {u'insert': True, u'multi': [44, -1], u'id': u'g-20'}},
# {u'new_val': {u'insert': True, u'multi': [45, -1], u'id': u'g-21'}},
# {u'new_val': {u'insert': True, u'multi': [46, -1], u'id': u'g-22'}},
# {u'new_val': {u'insert': True, u'multi': [47, -1], u'id': u'g-23'}},
# {u'new_val': {u'insert': True, u'multi': [48, -1], u'id': u'g-24'}},
# ->
# {u'new_val': {u'insert': True, u'multi': [44, -1], u'id': u'g-20'}},
# {u'new_val': {u'insert': True, u'multi': [45, -1], u'id': u'g-21'}},
# {u'new_val': {u'insert': True, u'multi': [46, -1], u'id': u'g-22'}},
# {u'new_val': {u'insert': True, u'multi': [47, -1], u'id': u'g-23'}},
# {u'new_val': {u'insert': True, u'multi': [48, -1], u'id': u'g-24'}}
# ]
# and want to insert the document
# {'insert': True, 'multi': [43, -1], u'id': 'g-19'}.
#
# This will get inserted once in the position marked by the arrow, which is
# hard to calculate or predict.
return [self._count.next()] * 3
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Basic example which iterates through the tasks specified and checks them for offensive
language.
Examples
--------
.. code-block:: shell
parlai detect_offensive_language -t "convai_chitchat" --display-examples True
""" # noqa: E501
from parlai.core.params import ParlaiParser
from parlai.core.agents import create_agent
from parlai.core.worlds import create_task
from parlai.utils.safety import OffensiveStringMatcher, OffensiveLanguageClassifier
from parlai.utils.misc import TimeLogger
import parlai.utils.logging as logging
from parlai.core.script import ParlaiScript, register_script
import random
def setup_args(parser=None):
if parser is None:
parser = ParlaiParser(True, True, 'Check task for offensive language')
# Get command line arguments
parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)
parser.add_argument('-d', '--display-examples', type='bool', default=False)
parser.add_argument(
'--safety',
type=str,
default='all',
choices={'string_matcher', 'classifier', 'all'},
help='Type of safety detector to apply to messages',
)
parser.set_defaults(datatype='train:ordered')
parser.set_defaults(model='repeat_query')
return parser
def detect(opt, printargs=None, print_parser=None):
"""
Checks a task for offensive language.
"""
if print_parser is not None:
if print_parser is True and isinstance(opt, ParlaiParser):
print_parser = opt
elif print_parser is False:
print_parser = None
random.seed(42)
# Create model and assign it to the specified task
agent = create_agent(opt, requireModelExists=True)
world = create_task(opt, agent)
if opt['safety'] == 'string_matcher' or opt['safety'] == 'all':
offensive_string_matcher = OffensiveStringMatcher()
if opt['safety'] == 'classifier' or opt['safety'] == 'all':
offensive_classifier = OffensiveLanguageClassifier()
if print_parser:
# Show arguments after loading model
print_parser.opt = agent.opt
print_parser.print_args()
log_every_n_secs = opt.get('log_every_n_secs', -1)
if log_every_n_secs <= 0:
log_every_n_secs = float('inf')
log_time = TimeLogger()
stats = {
'bad_words': [],
'bad_words_cnt': 0,
'string_offensive': 0,
'classifier_offensive': 0,
'total_offensive': 0,
'total': 0,
}
def report(world, stats):
report = world.report()
log = {
'word_offenses': stats['bad_words_cnt'],
'classifier_offenses%': 100
* (stats['classifier_offensive'] / stats['total']),
'string_offenses%': 100 * (stats['string_offensive'] / stats['total']),
'total_offenses%': 100 * (stats['total_offensive'] / stats['total']),
}
text, log = log_time.log(report['exs'], world.num_examples(), log)
logging.info(text)
def classify(text, stats):
offensive = False
stats['total'] += 1
if opt['safety'] == 'string_matcher' or opt['safety'] == 'all':
bad_words = offensive_string_matcher.contains_offensive_language(text)
if bad_words:
stats['string_offensive'] += 1
offensive = True
stats['bad_words'].append(bad_words)
if opt['safety'] == 'classifier' or opt['safety'] == 'all':
if text in offensive_classifier:
stats['classifier_offensive'] += 1
offensive = True
if offensive:
stats['total_offensive'] += 1
while not world.epoch_done():
world.parley()
stats['bad_words'] = []
for a in world.acts:
text = a.get('text', '')
classify(text, stats)
labels = a.get('labels', a.get('eval_labels', ''))
for l in labels:
classify(l, stats)
if len(stats['bad_words']) > 0 and opt['display_examples']:
logging.info(world.display())
logging.info(
"Offensive words detected: {}".format(', '.join(stats['bad_words']))
)
stats['bad_words_cnt'] += len(stats['bad_words'])
if log_time.time() > log_every_n_secs:
report(world, stats)
if world.epoch_done():
logging.info("epoch done")
report(world, stats)
return world.report()
@register_script('detect_offensive', hidden=True)
class DetectOffensive(ParlaiScript):
@classmethod
def setup_args(cls):
return setup_args()
def run(self):
return detect(self.opt, print_parser=self.parser)
if __name__ == '__main__':
DetectOffensive.main()
|
import _plotly_utils.basevalidators
class MarkerValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="marker", parent_name="splom", **kwargs):
super(MarkerValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Marker"),
data_docs=kwargs.pop(
"data_docs",
"""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if in `marker.color`is set to a
numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.color`) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical
array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.cmin` and/or `marker.cmax` to
be equidistant to this point. Has an effect
only if in `marker.color`is set to a numerical
array. Value should have the same units as in
`marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`new_plotly.graph_objects.splom.marker.Color
Bar` instance or dict with compatible
properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may
be a palette name string of the following list:
Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Bl
ues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
line
:class:`new_plotly.graph_objects.splom.marker.Line`
instance or dict with compatible properties
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud
for opacity .
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.color`is set to a
numerical array. If true, `marker.cmin` will
correspond to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `marker.color`is set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px)
of the rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the
data in `size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points.
Use with `sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
symbol
Sets the marker symbol type. Adding 100 is
equivalent to appending "-open" to a symbol
name. Adding 200 is equivalent to appending
"-dot" to a symbol name. Adding 300 is
equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on Chart Studio Cloud
for symbol .
""",
),
**kwargs
)
|
# ----------------------------------------------------------------------------
# Copyright (c) 2020, Meta-Storms development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import qiime2.plugin.model as model
class MetaStormsOTUDatabaseFmt(model.TextFileFormat):
def sniff(self):
# Not sure what should be done to validate the contents
return True
class MetaStormsSPDatabaseFmt(model.TextFileFormat):
def sniff(self):
# Not sure what should be done to validate the contents
return True
class MetaStormsFUNCDatabaseFmt(model.TextFileFormat):
def sniff(self):
# Not sure what should be done to validate the contents
return True
class MetaStormsSearchResultsFmt(model.TextFileFormat):
def sniff(self):
# Not sure what should be done to validate the contents
return True
class MetaStormsMetaResultsFmt(model.TextFileFormat):
def sniff(self):
# Not sure what should be done to validate the contents
return True
class MetaStormsMNSResultsFmt(model.TextFileFormat):
def sniff(self):
# Not sure what should be done to validate the contents
return True
MetaStormsOTUDatabaseDirFmt = model.SingleFileDirectoryFormat(
'MetaStormsOTUDatabaseDirFmt', 'database.mdb', MetaStormsOTUDatabaseFmt)
MetaStormsSPDatabaseDirFmt = model.SingleFileDirectoryFormat(
'MetaStormsSPDatabaseDirFmt', 'database.mdbs', MetaStormsSPDatabaseFmt)
MetaStormsFUNCDatabaseDirFmt = model.SingleFileDirectoryFormat(
'MetaStormsFUNCDatabaseDirFmt', 'database.mdbf', MetaStormsFUNCDatabaseFmt)
MetaStormsSearchResultsDirFmt = model.SingleFileDirectoryFormat(
'MetaStormsSearchResultsDirFmt', 'query.out', MetaStormsSearchResultsFmt)
MetaStormsMetaResultsDirFmt = model.SingleFileDirectoryFormat(
'MetaStormsMetaResultsDirFmt', 'query.out.meta', MetaStormsMetaResultsFmt)
MetaStormsMNSResultsDirFmt = model.SingleFileDirectoryFormat(
'MetaStormsMNSResultsDirFmt', 'query.out.mns', MetaStormsMNSResultsFmt)
|
from django.db import models
class Item(models.Model):
text = models.TextField(blank=False, null=False)
date_posted = models.DateField(auto_now=True)
def __str__(self): # __unicode__ for Python 2
return "item"
|
from .feature_extract import FeatureExtractor
from .intra_speaker_dataset import IntraSpeakerDataset, collate_pseudo
from .noise import WavAug
from .preprocess_dataset import PreprocessDataset
from .utils import *
from .VCTK_split import train_valid_test
|
import py
from rpython.rlib.parsing.ebnfparse import parse_ebnf
from rpython.rlib.parsing.regexparse import parse_regex
from rpython.rlib.parsing.lexer import Lexer, DummyLexer
from rpython.rlib.parsing.deterministic import DFA, LexerError
from rpython.rlib.parsing.tree import Nonterminal, Symbol, RPythonVisitor
from rpython.rlib.parsing.parsing import PackratParser, LazyParseTable, Rule
from rpython.rlib.parsing.parsing import ParseError, ErrorInformation
from rpython.rlib.parsing.regex import StringExpression
from rpython.rlib.rstring import ParseStringOverflowError, ParseStringError
from rpython.rlib.rarithmetic import ovfcheck, string_to_int
from rpython.rlib.rbigint import rbigint
from prolog.interpreter.continuation import Engine
from prolog.interpreter.module import Module
from prolog.interpreter import error
def make_regexes():
regexs = [
("VAR", parse_regex("[A-Z_]([a-zA-Z0-9]|_)*|_")),
("NUMBER", parse_regex("(0|[1-9][0-9]*)")),
("FLOAT", parse_regex("(0|[1-9][0-9]*)(\.[0-9]+)([eE][-+]?[0-9]+)?")),
("IGNORE", parse_regex(
"[ \\n\\t\\r]|(/\\*[^\\*]*(\\*[^/][^\\*]*)*\\*+/)|(%[^\\n]*)")),
("ATOM", parse_regex("([a-z]([a-zA-Z0-9]|_)*)|('[^']*')|\[\]|!|\+|\-|\{\}")),
("STRING", parse_regex('"[^"]*"')),
("(", parse_regex("\(")),
(")", parse_regex("\)")),
("[", parse_regex("\[")),
("]", parse_regex("\]")),
("{", parse_regex("\{")),
("}", parse_regex("\}")),
(".", parse_regex("\.")),
("|", parse_regex("\|")),
]
return zip(*regexs)
basic_rules = [
Rule('query', [['toplevel_op_expr', '.', 'EOF']]),
Rule('fact', [['toplevel_op_expr', '.']]),
Rule('complexterm', [['ATOM', '(', 'toplevel_op_expr', ')'], ['expr']]),
Rule('expr',
[['VAR'],
['NUMBER'],
['+', 'NUMBER'],
['-', 'NUMBER'],
['FLOAT'],
['+', 'FLOAT'],
['-', 'FLOAT'],
['ATOM'],
['STRING'],
['(', 'toplevel_op_expr', ')'],
['{', 'toplevel_op_expr', '}'],
['listexpr'],
]),
Rule('listexpr', [['[', 'listbody', ']']]),
Rule('listbody',
[['toplevel_op_expr', '|', 'toplevel_op_expr'],
['toplevel_op_expr']])
]
# x: term with priority lower than f
# y: term with priority lower or equal than f
# possible types: xf yf xfx xfy yfx yfy fy fx
# priorities: A > B
#
# binaryops
# (1) xfx: A -> B f B | B
# (2) xfy: A -> B f A | B
# (3) yfx: A -> A f B | B
# (4) yfy: A -> A f A | B
#
# unaryops
# (5) fx: A -> f A | B
# (6) fy: A -> f B | B
# (7) xf: A -> B f | B
# (8) yf: A -> A f | B
def make_default_operations():
operations = [
(1200, [("xfx", ["-->", ":-"]),
("fx", [":-", "?-"])]),
(1150, [("fx", ["meta_predicate"])]),
(1100, [("xfy", [";"])]),
(1050, [("xfy", ["->"]),
("fx", ["block"])]),
(1000, [("xfy", [","])]),
(900, [("fy", ["\\+"]),
("fx", ["~"])]),
(700, [("xfx", ["<", "=", "=..", "=@=", "=:=", "=<", "==", "=\=", ">", "?=",
">=", "@<", "@=<", "@>", "@>=", "\=", "\==", "is"])]),
(600, [("xfy", [":"])]),
(500, [("yfx", ["+", "-", "/\\", "\\/", "xor"]),
( "fx", ["+", "-", "?", "\\"])]),
(400, [("yfx", ["*", "/", "//", "<<", ">>", "mod", "rem"])]),
(200, [("xfx", ["**"]), ("xfy", ["^"])]),
]
return operations
default_operations = make_default_operations()
import sys
sys.setrecursionlimit(10000)
def make_from_form(form, op, x, y):
result = []
for c in form:
if c == 'x':
result.append(x)
if c == 'y':
result.append(y)
if c == 'f':
result.append(op)
return result
def make_expansion(y, x, allops):
expansions = []
for form, ops in allops:
for op in ops:
expansion = make_from_form(form, op, x, y)
expansions.append(expansion)
expansions.append([x])
return expansions
def eliminate_immediate_left_recursion(symbol, expansions):
newsymbol = "extra%s" % (symbol, )
newexpansions = []
with_recursion = [expansion for expansion in expansions
if expansion[0] == symbol]
without_recursion = [expansion for expansion in expansions
if expansion[0] != symbol]
expansions = [expansion + [newsymbol] for expansion in without_recursion]
newexpansions = [expansion[1:] + [newsymbol]
for expansion in with_recursion]
newexpansions.append([])
return expansions, newexpansions, newsymbol
def make_all_rules(standard_rules, operations=None):
if operations is None:
operations = default_operations
all_rules = standard_rules[:]
for i in range(len(operations)):
precedence, allops = operations[i]
if i == 0:
y = "toplevel_op_expr"
else:
y = "expr%s" % (precedence, )
if i != len(operations) - 1:
x = "expr%s" % (operations[i + 1][0], )
else:
x = "complexterm"
expansions = make_expansion(y, x, allops)
tup = eliminate_immediate_left_recursion(y, expansions)
expansions, extra_expansions, extra_symbol = tup
all_rules.append(Rule(extra_symbol, extra_expansions))
all_rules.append(Rule(y, expansions))
return all_rules
def add_necessary_regexs(regexs, names, operations=None):
if operations is None:
operations = default_operations
regexs = regexs[:]
names = names[:]
for precedence, allops in operations:
for form, ops in allops:
for op in ops:
regexs.insert(-1, StringExpression(op))
names.insert(-1, "ATOM")
return regexs, names
class PrologParseTable(LazyParseTable):
def terminal_equality(self, symbol, input):
if input.name == "ATOM":
return symbol == "ATOM" or symbol == input.source
return symbol == input.name
def match_symbol(self, i, symbol):
return LazyParseTable.match_symbol(self, i, symbol)
class PrologPackratParser(PackratParser):
def __init__(self, rules, startsymbol):
PackratParser.__init__(self, rules, startsymbol, PrologParseTable,
check_for_left_recursion=False)
def make_basic_rules():
names, regexs = make_regexes()
return basic_rules, names, regexs
def make_parser(basic_rules, names, regexs):
real_rules = make_all_rules(basic_rules)
# for r in real_rules:
# print r
regexs, names = add_necessary_regexs(list(regexs), list(names))
lexer = Lexer(regexs, names, ignore=["IGNORE"])
parser_fact = PrologPackratParser(real_rules, "fact")
parser_query = PrologPackratParser(real_rules, "query")
return lexer, parser_fact, parser_query, basic_rules
def make_all():
return make_parser(*make_basic_rules())
def make_parser_at_runtime(operations):
real_rules = make_all_rules(basic_rules, operations)
parser_fact = PrologPackratParser(real_rules, "fact")
return parser_fact
def _dummyfunc(arg, tree, source_string, file_name, similarity=None):
return parser_fact
def parse_file(s, parser=None, callback=_dummyfunc, arg=None, file_name=None, similarity=None):
if file_name is None:
file_name = "<unknown>" # for error messages only
try:
return _parse_file(s, parser, callback, arg, file_name, similarity=similarity)
except ParseError, exc:
message = exc.nice_error_message(file_name, s)
lineno = exc.source_pos.lineno
except LexerError, exc:
message = exc.nice_error_message(file_name)
lineno = exc.source_pos.lineno
raise error.PrologParseError(file_name, lineno, message)
def _parse_file(s, parser, callback, arg, file_name, similarity=None):
tokens = lexer.tokenize(s)
lines = []
line = []
for tok in tokens:
line.append(tok)
if tok.name== ".":
lines.append(line)
line = []
if line:
pos = tokens[-1].source_pos
raise ParseError(pos, ErrorInformation(len(tokens) - 1, ["."]))
if parser is None:
parser = parser_fact
trees = []
for line in lines:
tree = parser.parse(line, lazy=False)
callback(arg, tree, s, file_name, similarity)
trees.append(tree)
return trees
def parse_query(s):
tokens = lexer.tokenize(s, eof=True)
s = parser_query.parse(tokens, lazy=False)
def parse_query_term(s):
return get_query_and_vars(s)[0]
def get_query_and_vars(s):
tokens = lexer.tokenize(s, eof=True)
s = parser_query.parse(tokens, lazy=False)
builder = TermBuilder()
query = builder.build_query(s)
return query, builder.varname_to_var
class OrderTransformer(object):
def transform(self, node):
if isinstance(node, Symbol):
return node
children = [c for c in node.children
if isinstance(c, Symbol) or (
isinstance(c, Nonterminal) and len(c.children))]
if isinstance(node, Nonterminal):
if len(children) == 1:
return Nonterminal(
node.symbol, [self.transform(children[0])])
if len(children) == 2 or len(children) == 3:
left = children[-2]
right = children[-1]
if (isinstance(right, Nonterminal) and
right.symbol.startswith("extraexpr")):
if len(children) == 2:
leftreplacement = self.transform(left)
else:
leftreplacement = Nonterminal(
node.symbol,
[self.transform(children[0]),
self.transform(left)])
children = [leftreplacement,
self.transform(right.children[0]),
self.transform(right.children[1])]
newnode = Nonterminal(node.symbol, children)
return self.transform_extra(right, newnode)
children = [self.transform(child) for child in children]
return Nonterminal(node.symbol, children)
def transform_extra(self, extranode, child):
children = [c for c in extranode.children
if isinstance(c, Symbol) or (
isinstance(c, Nonterminal) and len(c.children))]
symbol = extranode.symbol[5:]
if len(children) == 2:
return child
right = children[2]
assert isinstance(right, Nonterminal)
children = [child,
self.transform(right.children[0]),
self.transform(right.children[1])]
newnode = Nonterminal(symbol, children)
return self.transform_extra(right, newnode)
class TermBuilder(RPythonVisitor):
def __init__(self):
self.varname_to_var = {}
def build(self, s):
"NOT_RPYTHON"
if isinstance(s, list):
return self.build_many(s)
return self.build_query(s)
def build_many(self, trees):
ot = OrderTransformer()
facts = []
for tree in trees:
s = ot.transform(tree)
facts.append(self.build_fact(s))
return facts
def build_query(self, s):
ot = OrderTransformer()
s = ot.transform(s)
return self.visit(s.children[0])
def build_fact(self, node):
self.varname_to_var = {}
return self.visit(node.children[0])
def visit(self, node):
node = self.find_first_interesting(node)
return self.dispatch(node)
def general_nonterminal_visit(self, node):
from prolog.interpreter.term import Callable, Number, Float, BigInt
children = []
name = ""
for child in node.children:
if isinstance(child, Symbol):
name = self.general_symbol_visit(child).name()
else:
children.append(child)
children = [self.visit(child) for child in children]
if len(children) == 1 and (name == "-" or name == "+"):
if name == "-":
factor = -1
else:
factor = 1
child = children[0]
if isinstance(child, Number):
return Number(factor * child.num)
if isinstance(child, Float):
return Float(factor * child.floatval)
if isinstance(child, BigInt):
return BigInt(rbigint.fromint(factor).mul(child.value))
return Callable.build(name, children)
def build_list(self, node):
result = []
while node is not None:
node = self._build_list(node, result)
return result
def _build_list(self, node, result):
node = self.find_first_interesting(node)
if isinstance(node, Nonterminal):
child = node.children[1]
if (isinstance(child, Symbol) and
node.children[1].additional_info == ","):
element = self.visit(node.children[0])
result.append(element)
return node.children[2]
result.append(self.visit(node))
def find_first_interesting(self, node):
if isinstance(node, Nonterminal) and len(node.children) == 1:
return self.find_first_interesting(node.children[0])
return node
def general_symbol_visit(self, node):
from prolog.interpreter.term import Callable
if node.additional_info.startswith("'"):
end = len(node.additional_info) - 1
assert end >= 0
name = unescape(node.additional_info[1:end])
else:
name = node.additional_info
return Callable.build(name)
def visit_VAR(self, node):
from prolog.interpreter.term import BindingVar
varname = node.additional_info
if varname == "_":
return BindingVar()
if varname in self.varname_to_var:
return self.varname_to_var[varname]
res = BindingVar()
self.varname_to_var[varname] = res
return res
def visit_NUMBER(self, node):
from prolog.interpreter.term import Number, BigInt
s = node.additional_info
try:
intval = string_to_int(s)
except ParseStringOverflowError: # overflow
return BigInt(rbigint.fromdecimalstr(s))
return Number(intval)
def visit_FLOAT(self, node):
from prolog.interpreter.term import Float
s = node.additional_info
return Float(float(s))
def visit_STRING(self, node):
from prolog.interpreter import helper
from prolog.interpreter.term import Callable, Number
from rpython.rlib.runicode import str_decode_utf_8
info = node.additional_info
s = info.strip('"')
s, _ = str_decode_utf_8(s, len(s), 'strict')
l = [Number(ord(c)) for c in s]
return helper.wrap_list(l)
def visit_complexterm(self, node):
from prolog.interpreter.term import Callable
name = self.general_symbol_visit(node.children[0]).name()
children = self.build_list(node.children[2])
return Callable.build(name, children[:])
def visit_expr(self, node):
from prolog.interpreter.term import Number, Float, BigInt
additional_info = node.children[0].additional_info
result = self.visit(node.children[1])
if additional_info == '-':
if isinstance(result, Number):
return Number(-result.num)
elif isinstance(result, Float):
return Float(-result.floatval)
elif additional_info == "{":
from prolog.interpreter.term import Callable
return Callable.build("{}", [result])
return result
def visit_listexpr(self, node):
from prolog.interpreter.term import Callable
node = node.children[1]
if len(node.children) == 1:
l = self.build_list(node)
start = Callable.build("[]")
else:
l = self.build_list(node.children[0])
start = self.visit(node.children[2])
l.reverse()
curr = start
for elt in l:
curr = Callable.build(".", [elt, curr])
return curr
ESCAPES = {
"\\a": "\a",
"\\b": "\b",
"\\f": "\f",
"\\n": "\n",
"\\r": "\r",
"\\t": "\t",
"\\v": "\v",
"\\\\": "\\"
}
def unescape(s):
if "\\" not in s:
return s
result = []
i = 0
escape = False
while i < len(s):
c = s[i]
if escape:
escape = False
f = "\\" + c
if f in ESCAPES:
result.append(ESCAPES[f])
else:
result.append(c)
elif c == "\\":
escape = True
else:
result.append(c)
i += 1
return "".join(result)
def get_engine(source, create_files=False, load_system=False, similarity=None, **modules):
from prolog.interpreter.continuation import Engine
from prolog.interpreter.test.tool import create_file, delete_file
e = Engine(load_system, similarity=similarity)
for name, module in modules.iteritems():
if create_files:
create_file(name, module)
else:
e.runstring(module)
try:
e.modulewrapper.current_module = e.modulewrapper.user_module
e.runstring(source)
finally:
if create_files:
for name in modules.keys():
delete_file(name)
return e
# generated code between this line and its other occurence
parser_fact = PrologPackratParser([Rule('query', [['toplevel_op_expr', '.', 'EOF']]),
Rule('fact', [['toplevel_op_expr', '.']]),
Rule('complexterm', [['ATOM', '(', 'toplevel_op_expr', ')'], ['expr']]),
Rule('expr', [['VAR'], ['NUMBER'], ['+', 'NUMBER'], ['-', 'NUMBER'], ['FLOAT'], ['+', 'FLOAT'], ['-', 'FLOAT'], ['ATOM'], ['STRING'], ['(', 'toplevel_op_expr', ')'], ['{', 'toplevel_op_expr', '}'], ['listexpr']]),
Rule('listexpr', [['[', 'listbody', ']']]),
Rule('listbody', [['toplevel_op_expr', '|', 'toplevel_op_expr'], ['toplevel_op_expr']]),
Rule('extratoplevel_op_expr', [[]]),
Rule('toplevel_op_expr', [['expr1150', '-->', 'expr1150', 'extratoplevel_op_expr'], ['expr1150', ':-', 'expr1150', 'extratoplevel_op_expr'], [':-', 'expr1150', 'extratoplevel_op_expr'], ['?-', 'expr1150', 'extratoplevel_op_expr'], ['expr1150', 'extratoplevel_op_expr']]),
Rule('extraexpr1150', [[]]),
Rule('expr1150', [['meta_predicate', 'expr1100', 'extraexpr1150'], ['expr1100', 'extraexpr1150']]),
Rule('extraexpr1100', [[]]),
Rule('expr1100', [['expr1050', ';', 'expr1100', 'extraexpr1100'], ['expr1050', 'extraexpr1100']]),
Rule('extraexpr1050', [[]]),
Rule('expr1050', [['expr1000', '->', 'expr1050', 'extraexpr1050'], ['block', 'expr1000', 'extraexpr1050'], ['expr1000', 'extraexpr1050']]),
Rule('extraexpr1000', [[]]),
Rule('expr1000', [['expr900', ',', 'expr1000', 'extraexpr1000'], ['expr900', 'extraexpr1000']]),
Rule('extraexpr900', [[]]),
Rule('expr900', [['\\+', 'expr900', 'extraexpr900'], ['~', 'expr700', 'extraexpr900'], ['expr700', 'extraexpr900']]),
Rule('extraexpr700', [[]]),
Rule('expr700', [['expr600', '<', 'expr600', 'extraexpr700'], ['expr600', '=', 'expr600', 'extraexpr700'], ['expr600', '=..', 'expr600', 'extraexpr700'], ['expr600', '=@=', 'expr600', 'extraexpr700'], ['expr600', '=:=', 'expr600', 'extraexpr700'], ['expr600', '=<', 'expr600', 'extraexpr700'], ['expr600', '==', 'expr600', 'extraexpr700'], ['expr600', '=\\=', 'expr600', 'extraexpr700'], ['expr600', '>', 'expr600', 'extraexpr700'], ['expr600', '?=', 'expr600', 'extraexpr700'], ['expr600', '>=', 'expr600', 'extraexpr700'], ['expr600', '@<', 'expr600', 'extraexpr700'], ['expr600', '@=<', 'expr600', 'extraexpr700'], ['expr600', '@>', 'expr600', 'extraexpr700'], ['expr600', '@>=', 'expr600', 'extraexpr700'], ['expr600', '\\=', 'expr600', 'extraexpr700'], ['expr600', '\\==', 'expr600', 'extraexpr700'], ['expr600', 'is', 'expr600', 'extraexpr700'], ['expr600', 'extraexpr700']]),
Rule('extraexpr600', [[]]),
Rule('expr600', [['expr500', ':', 'expr600', 'extraexpr600'], ['expr500', 'extraexpr600']]),
Rule('extraexpr500', [['+', 'expr400', 'extraexpr500'], ['-', 'expr400', 'extraexpr500'], ['/\\', 'expr400', 'extraexpr500'], ['\\/', 'expr400', 'extraexpr500'], ['xor', 'expr400', 'extraexpr500'], []]),
Rule('expr500', [['+', 'expr400', 'extraexpr500'], ['-', 'expr400', 'extraexpr500'], ['?', 'expr400', 'extraexpr500'], ['\\', 'expr400', 'extraexpr500'], ['expr400', 'extraexpr500']]),
Rule('extraexpr400', [['*', 'expr200', 'extraexpr400'], ['/', 'expr200', 'extraexpr400'], ['//', 'expr200', 'extraexpr400'], ['<<', 'expr200', 'extraexpr400'], ['>>', 'expr200', 'extraexpr400'], ['mod', 'expr200', 'extraexpr400'], ['rem', 'expr200', 'extraexpr400'], []]),
Rule('expr400', [['expr200', 'extraexpr400']]),
Rule('extraexpr200', [[]]),
Rule('expr200', [['complexterm', '**', 'complexterm', 'extraexpr200'], ['complexterm', '^', 'expr200', 'extraexpr200'], ['complexterm', 'extraexpr200']])],
'fact')
parser_query = PrologPackratParser([Rule('query', [['toplevel_op_expr', '.', 'EOF']]),
Rule('fact', [['toplevel_op_expr', '.']]),
Rule('complexterm', [['ATOM', '(', 'toplevel_op_expr', ')'], ['expr']]),
Rule('expr', [['VAR'], ['NUMBER'], ['+', 'NUMBER'], ['-', 'NUMBER'], ['FLOAT'], ['+', 'FLOAT'], ['-', 'FLOAT'], ['ATOM'], ['STRING'], ['(', 'toplevel_op_expr', ')'], ['{', 'toplevel_op_expr', '}'], ['listexpr']]),
Rule('listexpr', [['[', 'listbody', ']']]),
Rule('listbody', [['toplevel_op_expr', '|', 'toplevel_op_expr'], ['toplevel_op_expr']]),
Rule('extratoplevel_op_expr', [[]]),
Rule('toplevel_op_expr', [['expr1150', '-->', 'expr1150', 'extratoplevel_op_expr'], ['expr1150', ':-', 'expr1150', 'extratoplevel_op_expr'], [':-', 'expr1150', 'extratoplevel_op_expr'], ['?-', 'expr1150', 'extratoplevel_op_expr'], ['expr1150', 'extratoplevel_op_expr']]),
Rule('extraexpr1150', [[]]),
Rule('expr1150', [['meta_predicate', 'expr1100', 'extraexpr1150'], ['expr1100', 'extraexpr1150']]),
Rule('extraexpr1100', [[]]),
Rule('expr1100', [['expr1050', ';', 'expr1100', 'extraexpr1100'], ['expr1050', 'extraexpr1100']]),
Rule('extraexpr1050', [[]]),
Rule('expr1050', [['expr1000', '->', 'expr1050', 'extraexpr1050'], ['block', 'expr1000', 'extraexpr1050'], ['expr1000', 'extraexpr1050']]),
Rule('extraexpr1000', [[]]),
Rule('expr1000', [['expr900', ',', 'expr1000', 'extraexpr1000'], ['expr900', 'extraexpr1000']]),
Rule('extraexpr900', [[]]),
Rule('expr900', [['\\+', 'expr900', 'extraexpr900'], ['~', 'expr700', 'extraexpr900'], ['expr700', 'extraexpr900']]),
Rule('extraexpr700', [[]]),
Rule('expr700', [['expr600', '<', 'expr600', 'extraexpr700'], ['expr600', '=', 'expr600', 'extraexpr700'], ['expr600', '=..', 'expr600', 'extraexpr700'], ['expr600', '=@=', 'expr600', 'extraexpr700'], ['expr600', '=:=', 'expr600', 'extraexpr700'], ['expr600', '=<', 'expr600', 'extraexpr700'], ['expr600', '==', 'expr600', 'extraexpr700'], ['expr600', '=\\=', 'expr600', 'extraexpr700'], ['expr600', '>', 'expr600', 'extraexpr700'], ['expr600', '?=', 'expr600', 'extraexpr700'], ['expr600', '>=', 'expr600', 'extraexpr700'], ['expr600', '@<', 'expr600', 'extraexpr700'], ['expr600', '@=<', 'expr600', 'extraexpr700'], ['expr600', '@>', 'expr600', 'extraexpr700'], ['expr600', '@>=', 'expr600', 'extraexpr700'], ['expr600', '\\=', 'expr600', 'extraexpr700'], ['expr600', '\\==', 'expr600', 'extraexpr700'], ['expr600', 'is', 'expr600', 'extraexpr700'], ['expr600', 'extraexpr700']]),
Rule('extraexpr600', [[]]),
Rule('expr600', [['expr500', ':', 'expr600', 'extraexpr600'], ['expr500', 'extraexpr600']]),
Rule('extraexpr500', [['+', 'expr400', 'extraexpr500'], ['-', 'expr400', 'extraexpr500'], ['/\\', 'expr400', 'extraexpr500'], ['\\/', 'expr400', 'extraexpr500'], ['xor', 'expr400', 'extraexpr500'], []]),
Rule('expr500', [['+', 'expr400', 'extraexpr500'], ['-', 'expr400', 'extraexpr500'], ['?', 'expr400', 'extraexpr500'], ['\\', 'expr400', 'extraexpr500'], ['expr400', 'extraexpr500']]),
Rule('extraexpr400', [['*', 'expr200', 'extraexpr400'], ['/', 'expr200', 'extraexpr400'], ['//', 'expr200', 'extraexpr400'], ['<<', 'expr200', 'extraexpr400'], ['>>', 'expr200', 'extraexpr400'], ['mod', 'expr200', 'extraexpr400'], ['rem', 'expr200', 'extraexpr400'], []]),
Rule('expr400', [['expr200', 'extraexpr400']]),
Rule('extraexpr200', [[]]),
Rule('expr200', [['complexterm', '**', 'complexterm', 'extraexpr200'], ['complexterm', '^', 'expr200', 'extraexpr200'], ['complexterm', 'extraexpr200']])],
'query')
def recognize(runner, i):
#auto-generated code, don't edit
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 0
return ~i
if char == '\t':
state = 1
elif char == '\n':
state = 1
elif char == '\r':
state = 1
elif char == ' ':
state = 1
elif char == '(':
state = 2
elif char == ',':
state = 3
elif char == '0':
state = 4
elif '1' <= char <= '9':
state = 5
elif char == '<':
state = 6
elif char == '@':
state = 7
elif 'A' <= char <= 'Z':
state = 8
elif char == '_':
state = 8
elif char == '\\':
state = 9
elif 'c' <= char <= 'h':
state = 10
elif 's' <= char <= 'w':
state = 10
elif 'n' <= char <= 'q':
state = 10
elif 'j' <= char <= 'l':
state = 10
elif char == 'y':
state = 10
elif char == 'z':
state = 10
elif char == 'a':
state = 10
elif char == 'x':
state = 11
elif char == '|':
state = 12
elif char == "'":
state = 13
elif char == '+':
state = 14
elif char == '/':
state = 15
elif char == ';':
state = 16
elif char == '?':
state = 17
elif char == '[':
state = 18
elif char == '{':
state = 19
elif char == '"':
state = 20
elif char == '*':
state = 21
elif char == '.':
state = 22
elif char == ':':
state = 23
elif char == '>':
state = 24
elif char == '^':
state = 25
elif char == 'b':
state = 26
elif char == 'r':
state = 27
elif char == '~':
state = 28
elif char == '!':
state = 29
elif char == '%':
state = 30
elif char == ')':
state = 31
elif char == '-':
state = 32
elif char == '=':
state = 33
elif char == ']':
state = 34
elif char == 'i':
state = 35
elif char == 'm':
state = 36
elif char == '}':
state = 37
else:
break
if state == 4:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 4
return i
if char == '.':
state = 98
else:
break
if state == 5:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 5
return i
if char == '.':
state = 98
elif '0' <= char <= '9':
state = 5
continue
else:
break
if state == 6:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 6
return i
if char == '<':
state = 97
else:
break
if state == 7:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 7
return ~i
if char == '=':
state = 92
elif char == '<':
state = 93
elif char == '>':
state = 94
else:
break
if state == 8:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 8
return i
if 'A' <= char <= 'Z':
state = 8
continue
elif 'a' <= char <= 'z':
state = 8
continue
elif '0' <= char <= '9':
state = 8
continue
elif char == '_':
state = 8
continue
else:
break
if state == 9:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 9
return i
if char == '+':
state = 88
elif char == '=':
state = 89
elif char == '/':
state = 90
else:
break
if state == 10:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 10
return i
if 'A' <= char <= 'Z':
state = 10
continue
elif 'a' <= char <= 'z':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif char == '_':
state = 10
continue
else:
break
if state == 11:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 11
return i
if 'A' <= char <= 'Z':
state = 10
continue
elif 'a' <= char <= 'n':
state = 10
continue
elif 'p' <= char <= 'z':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif char == '_':
state = 10
continue
elif char == 'o':
state = 86
else:
break
if state == 13:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 13
return ~i
if '(' <= char <= '\xff':
state = 13
continue
elif '\x00' <= char <= '&':
state = 13
continue
elif char == "'":
state = 29
else:
break
if state == 15:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 15
return i
if char == '*':
state = 80
elif char == '\\':
state = 81
elif char == '/':
state = 82
else:
break
if state == 17:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 17
return i
if char == '=':
state = 78
elif char == '-':
state = 79
else:
break
if state == 18:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 18
return i
if char == ']':
state = 29
else:
break
if state == 19:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 19
return i
if char == '}':
state = 29
else:
break
if state == 20:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 20
return ~i
if '#' <= char <= '\xff':
state = 20
continue
elif '\x00' <= char <= '!':
state = 20
continue
elif char == '"':
state = 77
else:
break
if state == 21:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 21
return i
if char == '*':
state = 76
else:
break
if state == 23:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 23
return i
if char == '-':
state = 75
else:
break
if state == 24:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 24
return i
if char == '=':
state = 73
elif char == '>':
state = 74
else:
break
if state == 26:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 26
return i
if 'A' <= char <= 'Z':
state = 10
continue
elif 'm' <= char <= 'z':
state = 10
continue
elif 'a' <= char <= 'k':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif char == '_':
state = 10
continue
elif char == 'l':
state = 69
else:
break
if state == 27:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 27
return i
if 'A' <= char <= 'Z':
state = 10
continue
elif 'f' <= char <= 'z':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif 'a' <= char <= 'd':
state = 10
continue
elif char == '_':
state = 10
continue
elif char == 'e':
state = 67
else:
break
if state == 30:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 30
return i
if '\x0b' <= char <= '\xff':
state = 30
continue
elif '\x00' <= char <= '\t':
state = 30
continue
else:
break
if state == 32:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 32
return i
if char == '-':
state = 64
elif char == '>':
state = 65
else:
break
if state == 33:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 33
return i
if char == '@':
state = 54
elif char == '<':
state = 55
elif char == '.':
state = 56
elif char == ':':
state = 57
elif char == '=':
state = 58
elif char == '\\':
state = 59
else:
break
if state == 35:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 35
return i
if 'A' <= char <= 'Z':
state = 10
continue
elif 'a' <= char <= 'r':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif 't' <= char <= 'z':
state = 10
continue
elif char == '_':
state = 10
continue
elif char == 's':
state = 53
else:
break
if state == 36:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 36
return i
if 'A' <= char <= 'Z':
state = 10
continue
elif 'p' <= char <= 'z':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif 'f' <= char <= 'n':
state = 10
continue
elif 'a' <= char <= 'd':
state = 10
continue
elif char == '_':
state = 10
continue
elif char == 'e':
state = 38
elif char == 'o':
state = 39
else:
break
if state == 38:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 38
return i
if char == 't':
state = 41
elif 'A' <= char <= 'Z':
state = 10
continue
elif 'a' <= char <= 's':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif 'u' <= char <= 'z':
state = 10
continue
elif char == '_':
state = 10
continue
else:
break
if state == 39:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 39
return i
if char == 'd':
state = 40
elif 'A' <= char <= 'Z':
state = 10
continue
elif 'e' <= char <= 'z':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif 'a' <= char <= 'c':
state = 10
continue
elif char == '_':
state = 10
continue
else:
break
if state == 40:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 40
return i
if 'A' <= char <= 'Z':
state = 10
continue
elif 'a' <= char <= 'z':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif char == '_':
state = 10
continue
else:
break
if state == 41:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 41
return i
if 'A' <= char <= 'Z':
state = 10
continue
elif 'b' <= char <= 'z':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif char == '_':
state = 10
continue
elif char == 'a':
state = 42
else:
break
if state == 42:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 42
return i
if 'A' <= char <= 'Z':
state = 10
continue
elif 'a' <= char <= 'z':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif char == '_':
state = 43
else:
break
if state == 43:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 43
return i
if 'A' <= char <= 'Z':
state = 10
continue
elif 'a' <= char <= 'o':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif 'q' <= char <= 'z':
state = 10
continue
elif char == '_':
state = 10
continue
elif char == 'p':
state = 44
else:
break
if state == 44:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 44
return i
if 'A' <= char <= 'Z':
state = 10
continue
elif 'a' <= char <= 'q':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif 's' <= char <= 'z':
state = 10
continue
elif char == '_':
state = 10
continue
elif char == 'r':
state = 45
else:
break
if state == 45:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 45
return i
if 'A' <= char <= 'Z':
state = 10
continue
elif 'f' <= char <= 'z':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif 'a' <= char <= 'd':
state = 10
continue
elif char == '_':
state = 10
continue
elif char == 'e':
state = 46
else:
break
if state == 46:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 46
return i
if 'A' <= char <= 'Z':
state = 10
continue
elif 'e' <= char <= 'z':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif 'a' <= char <= 'c':
state = 10
continue
elif char == '_':
state = 10
continue
elif char == 'd':
state = 47
else:
break
if state == 47:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 47
return i
if char == 'i':
state = 48
elif 'A' <= char <= 'Z':
state = 10
continue
elif 'j' <= char <= 'z':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif 'a' <= char <= 'h':
state = 10
continue
elif char == '_':
state = 10
continue
else:
break
if state == 48:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 48
return i
if char == 'c':
state = 49
elif 'A' <= char <= 'Z':
state = 10
continue
elif 'd' <= char <= 'z':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif char == 'a':
state = 10
continue
elif char == 'b':
state = 10
continue
elif char == '_':
state = 10
continue
else:
break
if state == 49:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 49
return i
if 'A' <= char <= 'Z':
state = 10
continue
elif 'b' <= char <= 'z':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif char == '_':
state = 10
continue
elif char == 'a':
state = 50
else:
break
if state == 50:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 50
return i
if 'A' <= char <= 'Z':
state = 10
continue
elif 'a' <= char <= 's':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif 'u' <= char <= 'z':
state = 10
continue
elif char == '_':
state = 10
continue
elif char == 't':
state = 51
else:
break
if state == 51:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 51
return i
if 'A' <= char <= 'Z':
state = 10
continue
elif 'f' <= char <= 'z':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif 'a' <= char <= 'd':
state = 10
continue
elif char == '_':
state = 10
continue
elif char == 'e':
state = 52
else:
break
if state == 52:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 52
return i
if 'A' <= char <= 'Z':
state = 10
continue
elif 'a' <= char <= 'z':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif char == '_':
state = 10
continue
else:
break
if state == 53:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 53
return i
if 'A' <= char <= 'Z':
state = 10
continue
elif 'a' <= char <= 'z':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif char == '_':
state = 10
continue
else:
break
if state == 54:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 54
return ~i
if char == '=':
state = 63
else:
break
if state == 56:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 56
return ~i
if char == '.':
state = 62
else:
break
if state == 57:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 57
return ~i
if char == '=':
state = 61
else:
break
if state == 59:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 59
return ~i
if char == '=':
state = 60
else:
break
if state == 64:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 64
return ~i
if char == '>':
state = 66
else:
break
if state == 67:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 67
return i
if 'A' <= char <= 'Z':
state = 10
continue
elif 'n' <= char <= 'z':
state = 10
continue
elif 'a' <= char <= 'l':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif char == '_':
state = 10
continue
elif char == 'm':
state = 68
else:
break
if state == 68:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 68
return i
if 'A' <= char <= 'Z':
state = 10
continue
elif 'a' <= char <= 'z':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif char == '_':
state = 10
continue
else:
break
if state == 69:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 69
return i
if 'A' <= char <= 'Z':
state = 10
continue
elif 'a' <= char <= 'n':
state = 10
continue
elif 'p' <= char <= 'z':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif char == '_':
state = 10
continue
elif char == 'o':
state = 70
else:
break
if state == 70:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 70
return i
if 'A' <= char <= 'Z':
state = 10
continue
elif 'd' <= char <= 'z':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif char == 'a':
state = 10
continue
elif char == 'b':
state = 10
continue
elif char == '_':
state = 10
continue
elif char == 'c':
state = 71
else:
break
if state == 71:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 71
return i
if char == 'k':
state = 72
elif 'A' <= char <= 'Z':
state = 10
continue
elif 'l' <= char <= 'z':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif 'a' <= char <= 'j':
state = 10
continue
elif char == '_':
state = 10
continue
else:
break
if state == 72:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 72
return i
if 'A' <= char <= 'Z':
state = 10
continue
elif 'a' <= char <= 'z':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif char == '_':
state = 10
continue
else:
break
if state == 80:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 80
return ~i
if '+' <= char <= '\xff':
state = 80
continue
elif '\x00' <= char <= ')':
state = 80
continue
elif char == '*':
state = 83
else:
break
if state == 83:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 83
return ~i
if '0' <= char <= '\xff':
state = 80
continue
elif '\x00' <= char <= ')':
state = 80
continue
elif '+' <= char <= '.':
state = 80
continue
elif char == '/':
state = 1
elif char == '*':
state = 84
else:
break
if state == 84:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 84
return ~i
if '0' <= char <= '\xff':
state = 80
continue
elif '\x00' <= char <= ')':
state = 80
continue
elif '+' <= char <= '.':
state = 80
continue
elif char == '*':
state = 83
continue
elif char == '/':
state = 85
else:
break
if state == 85:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 85
return i
if '+' <= char <= '\xff':
state = 80
continue
elif '\x00' <= char <= ')':
state = 80
continue
elif char == '*':
state = 83
continue
else:
break
if state == 86:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 86
return i
if 'A' <= char <= 'Z':
state = 10
continue
elif 'a' <= char <= 'q':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif 's' <= char <= 'z':
state = 10
continue
elif char == '_':
state = 10
continue
elif char == 'r':
state = 87
else:
break
if state == 87:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 87
return i
if 'A' <= char <= 'Z':
state = 10
continue
elif 'a' <= char <= 'z':
state = 10
continue
elif '0' <= char <= '9':
state = 10
continue
elif char == '_':
state = 10
continue
else:
break
if state == 89:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 89
return i
if char == '=':
state = 91
else:
break
if state == 92:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 92
return ~i
if char == '<':
state = 96
else:
break
if state == 94:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 94
return i
if char == '=':
state = 95
else:
break
if state == 98:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 98
return ~i
if '0' <= char <= '9':
state = 99
else:
break
if state == 99:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 99
return i
if '0' <= char <= '9':
state = 99
continue
elif char == 'E':
state = 100
elif char == 'e':
state = 100
else:
break
if state == 100:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 100
return ~i
if char == '+':
state = 101
elif char == '-':
state = 101
elif '0' <= char <= '9':
state = 102
else:
break
if state == 101:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 101
return ~i
if '0' <= char <= '9':
state = 102
else:
break
if state == 102:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 102
return i
if '0' <= char <= '9':
state = 102
continue
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
lexer = DummyLexer(recognize, DFA(103,
{(0, '\t'): 1,
(0, '\n'): 1,
(0, '\r'): 1,
(0, ' '): 1,
(0, '!'): 29,
(0, '"'): 20,
(0, '%'): 30,
(0, "'"): 13,
(0, '('): 2,
(0, ')'): 31,
(0, '*'): 21,
(0, '+'): 14,
(0, ','): 3,
(0, '-'): 32,
(0, '.'): 22,
(0, '/'): 15,
(0, '0'): 4,
(0, '1'): 5,
(0, '2'): 5,
(0, '3'): 5,
(0, '4'): 5,
(0, '5'): 5,
(0, '6'): 5,
(0, '7'): 5,
(0, '8'): 5,
(0, '9'): 5,
(0, ':'): 23,
(0, ';'): 16,
(0, '<'): 6,
(0, '='): 33,
(0, '>'): 24,
(0, '?'): 17,
(0, '@'): 7,
(0, 'A'): 8,
(0, 'B'): 8,
(0, 'C'): 8,
(0, 'D'): 8,
(0, 'E'): 8,
(0, 'F'): 8,
(0, 'G'): 8,
(0, 'H'): 8,
(0, 'I'): 8,
(0, 'J'): 8,
(0, 'K'): 8,
(0, 'L'): 8,
(0, 'M'): 8,
(0, 'N'): 8,
(0, 'O'): 8,
(0, 'P'): 8,
(0, 'Q'): 8,
(0, 'R'): 8,
(0, 'S'): 8,
(0, 'T'): 8,
(0, 'U'): 8,
(0, 'V'): 8,
(0, 'W'): 8,
(0, 'X'): 8,
(0, 'Y'): 8,
(0, 'Z'): 8,
(0, '['): 18,
(0, '\\'): 9,
(0, ']'): 34,
(0, '^'): 25,
(0, '_'): 8,
(0, 'a'): 10,
(0, 'b'): 26,
(0, 'c'): 10,
(0, 'd'): 10,
(0, 'e'): 10,
(0, 'f'): 10,
(0, 'g'): 10,
(0, 'h'): 10,
(0, 'i'): 35,
(0, 'j'): 10,
(0, 'k'): 10,
(0, 'l'): 10,
(0, 'm'): 36,
(0, 'n'): 10,
(0, 'o'): 10,
(0, 'p'): 10,
(0, 'q'): 10,
(0, 'r'): 27,
(0, 's'): 10,
(0, 't'): 10,
(0, 'u'): 10,
(0, 'v'): 10,
(0, 'w'): 10,
(0, 'x'): 11,
(0, 'y'): 10,
(0, 'z'): 10,
(0, '{'): 19,
(0, '|'): 12,
(0, '}'): 37,
(0, '~'): 28,
(4, '.'): 98,
(5, '.'): 98,
(5, '0'): 5,
(5, '1'): 5,
(5, '2'): 5,
(5, '3'): 5,
(5, '4'): 5,
(5, '5'): 5,
(5, '6'): 5,
(5, '7'): 5,
(5, '8'): 5,
(5, '9'): 5,
(6, '<'): 97,
(7, '<'): 93,
(7, '='): 92,
(7, '>'): 94,
(8, '0'): 8,
(8, '1'): 8,
(8, '2'): 8,
(8, '3'): 8,
(8, '4'): 8,
(8, '5'): 8,
(8, '6'): 8,
(8, '7'): 8,
(8, '8'): 8,
(8, '9'): 8,
(8, 'A'): 8,
(8, 'B'): 8,
(8, 'C'): 8,
(8, 'D'): 8,
(8, 'E'): 8,
(8, 'F'): 8,
(8, 'G'): 8,
(8, 'H'): 8,
(8, 'I'): 8,
(8, 'J'): 8,
(8, 'K'): 8,
(8, 'L'): 8,
(8, 'M'): 8,
(8, 'N'): 8,
(8, 'O'): 8,
(8, 'P'): 8,
(8, 'Q'): 8,
(8, 'R'): 8,
(8, 'S'): 8,
(8, 'T'): 8,
(8, 'U'): 8,
(8, 'V'): 8,
(8, 'W'): 8,
(8, 'X'): 8,
(8, 'Y'): 8,
(8, 'Z'): 8,
(8, '_'): 8,
(8, 'a'): 8,
(8, 'b'): 8,
(8, 'c'): 8,
(8, 'd'): 8,
(8, 'e'): 8,
(8, 'f'): 8,
(8, 'g'): 8,
(8, 'h'): 8,
(8, 'i'): 8,
(8, 'j'): 8,
(8, 'k'): 8,
(8, 'l'): 8,
(8, 'm'): 8,
(8, 'n'): 8,
(8, 'o'): 8,
(8, 'p'): 8,
(8, 'q'): 8,
(8, 'r'): 8,
(8, 's'): 8,
(8, 't'): 8,
(8, 'u'): 8,
(8, 'v'): 8,
(8, 'w'): 8,
(8, 'x'): 8,
(8, 'y'): 8,
(8, 'z'): 8,
(9, '+'): 88,
(9, '/'): 90,
(9, '='): 89,
(10, '0'): 10,
(10, '1'): 10,
(10, '2'): 10,
(10, '3'): 10,
(10, '4'): 10,
(10, '5'): 10,
(10, '6'): 10,
(10, '7'): 10,
(10, '8'): 10,
(10, '9'): 10,
(10, 'A'): 10,
(10, 'B'): 10,
(10, 'C'): 10,
(10, 'D'): 10,
(10, 'E'): 10,
(10, 'F'): 10,
(10, 'G'): 10,
(10, 'H'): 10,
(10, 'I'): 10,
(10, 'J'): 10,
(10, 'K'): 10,
(10, 'L'): 10,
(10, 'M'): 10,
(10, 'N'): 10,
(10, 'O'): 10,
(10, 'P'): 10,
(10, 'Q'): 10,
(10, 'R'): 10,
(10, 'S'): 10,
(10, 'T'): 10,
(10, 'U'): 10,
(10, 'V'): 10,
(10, 'W'): 10,
(10, 'X'): 10,
(10, 'Y'): 10,
(10, 'Z'): 10,
(10, '_'): 10,
(10, 'a'): 10,
(10, 'b'): 10,
(10, 'c'): 10,
(10, 'd'): 10,
(10, 'e'): 10,
(10, 'f'): 10,
(10, 'g'): 10,
(10, 'h'): 10,
(10, 'i'): 10,
(10, 'j'): 10,
(10, 'k'): 10,
(10, 'l'): 10,
(10, 'm'): 10,
(10, 'n'): 10,
(10, 'o'): 10,
(10, 'p'): 10,
(10, 'q'): 10,
(10, 'r'): 10,
(10, 's'): 10,
(10, 't'): 10,
(10, 'u'): 10,
(10, 'v'): 10,
(10, 'w'): 10,
(10, 'x'): 10,
(10, 'y'): 10,
(10, 'z'): 10,
(11, '0'): 10,
(11, '1'): 10,
(11, '2'): 10,
(11, '3'): 10,
(11, '4'): 10,
(11, '5'): 10,
(11, '6'): 10,
(11, '7'): 10,
(11, '8'): 10,
(11, '9'): 10,
(11, 'A'): 10,
(11, 'B'): 10,
(11, 'C'): 10,
(11, 'D'): 10,
(11, 'E'): 10,
(11, 'F'): 10,
(11, 'G'): 10,
(11, 'H'): 10,
(11, 'I'): 10,
(11, 'J'): 10,
(11, 'K'): 10,
(11, 'L'): 10,
(11, 'M'): 10,
(11, 'N'): 10,
(11, 'O'): 10,
(11, 'P'): 10,
(11, 'Q'): 10,
(11, 'R'): 10,
(11, 'S'): 10,
(11, 'T'): 10,
(11, 'U'): 10,
(11, 'V'): 10,
(11, 'W'): 10,
(11, 'X'): 10,
(11, 'Y'): 10,
(11, 'Z'): 10,
(11, '_'): 10,
(11, 'a'): 10,
(11, 'b'): 10,
(11, 'c'): 10,
(11, 'd'): 10,
(11, 'e'): 10,
(11, 'f'): 10,
(11, 'g'): 10,
(11, 'h'): 10,
(11, 'i'): 10,
(11, 'j'): 10,
(11, 'k'): 10,
(11, 'l'): 10,
(11, 'm'): 10,
(11, 'n'): 10,
(11, 'o'): 86,
(11, 'p'): 10,
(11, 'q'): 10,
(11, 'r'): 10,
(11, 's'): 10,
(11, 't'): 10,
(11, 'u'): 10,
(11, 'v'): 10,
(11, 'w'): 10,
(11, 'x'): 10,
(11, 'y'): 10,
(11, 'z'): 10,
(13, '\x00'): 13,
(13, '\x01'): 13,
(13, '\x02'): 13,
(13, '\x03'): 13,
(13, '\x04'): 13,
(13, '\x05'): 13,
(13, '\x06'): 13,
(13, '\x07'): 13,
(13, '\x08'): 13,
(13, '\t'): 13,
(13, '\n'): 13,
(13, '\x0b'): 13,
(13, '\x0c'): 13,
(13, '\r'): 13,
(13, '\x0e'): 13,
(13, '\x0f'): 13,
(13, '\x10'): 13,
(13, '\x11'): 13,
(13, '\x12'): 13,
(13, '\x13'): 13,
(13, '\x14'): 13,
(13, '\x15'): 13,
(13, '\x16'): 13,
(13, '\x17'): 13,
(13, '\x18'): 13,
(13, '\x19'): 13,
(13, '\x1a'): 13,
(13, '\x1b'): 13,
(13, '\x1c'): 13,
(13, '\x1d'): 13,
(13, '\x1e'): 13,
(13, '\x1f'): 13,
(13, ' '): 13,
(13, '!'): 13,
(13, '"'): 13,
(13, '#'): 13,
(13, '$'): 13,
(13, '%'): 13,
(13, '&'): 13,
(13, "'"): 29,
(13, '('): 13,
(13, ')'): 13,
(13, '*'): 13,
(13, '+'): 13,
(13, ','): 13,
(13, '-'): 13,
(13, '.'): 13,
(13, '/'): 13,
(13, '0'): 13,
(13, '1'): 13,
(13, '2'): 13,
(13, '3'): 13,
(13, '4'): 13,
(13, '5'): 13,
(13, '6'): 13,
(13, '7'): 13,
(13, '8'): 13,
(13, '9'): 13,
(13, ':'): 13,
(13, ';'): 13,
(13, '<'): 13,
(13, '='): 13,
(13, '>'): 13,
(13, '?'): 13,
(13, '@'): 13,
(13, 'A'): 13,
(13, 'B'): 13,
(13, 'C'): 13,
(13, 'D'): 13,
(13, 'E'): 13,
(13, 'F'): 13,
(13, 'G'): 13,
(13, 'H'): 13,
(13, 'I'): 13,
(13, 'J'): 13,
(13, 'K'): 13,
(13, 'L'): 13,
(13, 'M'): 13,
(13, 'N'): 13,
(13, 'O'): 13,
(13, 'P'): 13,
(13, 'Q'): 13,
(13, 'R'): 13,
(13, 'S'): 13,
(13, 'T'): 13,
(13, 'U'): 13,
(13, 'V'): 13,
(13, 'W'): 13,
(13, 'X'): 13,
(13, 'Y'): 13,
(13, 'Z'): 13,
(13, '['): 13,
(13, '\\'): 13,
(13, ']'): 13,
(13, '^'): 13,
(13, '_'): 13,
(13, '`'): 13,
(13, 'a'): 13,
(13, 'b'): 13,
(13, 'c'): 13,
(13, 'd'): 13,
(13, 'e'): 13,
(13, 'f'): 13,
(13, 'g'): 13,
(13, 'h'): 13,
(13, 'i'): 13,
(13, 'j'): 13,
(13, 'k'): 13,
(13, 'l'): 13,
(13, 'm'): 13,
(13, 'n'): 13,
(13, 'o'): 13,
(13, 'p'): 13,
(13, 'q'): 13,
(13, 'r'): 13,
(13, 's'): 13,
(13, 't'): 13,
(13, 'u'): 13,
(13, 'v'): 13,
(13, 'w'): 13,
(13, 'x'): 13,
(13, 'y'): 13,
(13, 'z'): 13,
(13, '{'): 13,
(13, '|'): 13,
(13, '}'): 13,
(13, '~'): 13,
(13, '\x7f'): 13,
(13, '\x80'): 13,
(13, '\x81'): 13,
(13, '\x82'): 13,
(13, '\x83'): 13,
(13, '\x84'): 13,
(13, '\x85'): 13,
(13, '\x86'): 13,
(13, '\x87'): 13,
(13, '\x88'): 13,
(13, '\x89'): 13,
(13, '\x8a'): 13,
(13, '\x8b'): 13,
(13, '\x8c'): 13,
(13, '\x8d'): 13,
(13, '\x8e'): 13,
(13, '\x8f'): 13,
(13, '\x90'): 13,
(13, '\x91'): 13,
(13, '\x92'): 13,
(13, '\x93'): 13,
(13, '\x94'): 13,
(13, '\x95'): 13,
(13, '\x96'): 13,
(13, '\x97'): 13,
(13, '\x98'): 13,
(13, '\x99'): 13,
(13, '\x9a'): 13,
(13, '\x9b'): 13,
(13, '\x9c'): 13,
(13, '\x9d'): 13,
(13, '\x9e'): 13,
(13, '\x9f'): 13,
(13, '\xa0'): 13,
(13, '\xa1'): 13,
(13, '\xa2'): 13,
(13, '\xa3'): 13,
(13, '\xa4'): 13,
(13, '\xa5'): 13,
(13, '\xa6'): 13,
(13, '\xa7'): 13,
(13, '\xa8'): 13,
(13, '\xa9'): 13,
(13, '\xaa'): 13,
(13, '\xab'): 13,
(13, '\xac'): 13,
(13, '\xad'): 13,
(13, '\xae'): 13,
(13, '\xaf'): 13,
(13, '\xb0'): 13,
(13, '\xb1'): 13,
(13, '\xb2'): 13,
(13, '\xb3'): 13,
(13, '\xb4'): 13,
(13, '\xb5'): 13,
(13, '\xb6'): 13,
(13, '\xb7'): 13,
(13, '\xb8'): 13,
(13, '\xb9'): 13,
(13, '\xba'): 13,
(13, '\xbb'): 13,
(13, '\xbc'): 13,
(13, '\xbd'): 13,
(13, '\xbe'): 13,
(13, '\xbf'): 13,
(13, '\xc0'): 13,
(13, '\xc1'): 13,
(13, '\xc2'): 13,
(13, '\xc3'): 13,
(13, '\xc4'): 13,
(13, '\xc5'): 13,
(13, '\xc6'): 13,
(13, '\xc7'): 13,
(13, '\xc8'): 13,
(13, '\xc9'): 13,
(13, '\xca'): 13,
(13, '\xcb'): 13,
(13, '\xcc'): 13,
(13, '\xcd'): 13,
(13, '\xce'): 13,
(13, '\xcf'): 13,
(13, '\xd0'): 13,
(13, '\xd1'): 13,
(13, '\xd2'): 13,
(13, '\xd3'): 13,
(13, '\xd4'): 13,
(13, '\xd5'): 13,
(13, '\xd6'): 13,
(13, '\xd7'): 13,
(13, '\xd8'): 13,
(13, '\xd9'): 13,
(13, '\xda'): 13,
(13, '\xdb'): 13,
(13, '\xdc'): 13,
(13, '\xdd'): 13,
(13, '\xde'): 13,
(13, '\xdf'): 13,
(13, '\xe0'): 13,
(13, '\xe1'): 13,
(13, '\xe2'): 13,
(13, '\xe3'): 13,
(13, '\xe4'): 13,
(13, '\xe5'): 13,
(13, '\xe6'): 13,
(13, '\xe7'): 13,
(13, '\xe8'): 13,
(13, '\xe9'): 13,
(13, '\xea'): 13,
(13, '\xeb'): 13,
(13, '\xec'): 13,
(13, '\xed'): 13,
(13, '\xee'): 13,
(13, '\xef'): 13,
(13, '\xf0'): 13,
(13, '\xf1'): 13,
(13, '\xf2'): 13,
(13, '\xf3'): 13,
(13, '\xf4'): 13,
(13, '\xf5'): 13,
(13, '\xf6'): 13,
(13, '\xf7'): 13,
(13, '\xf8'): 13,
(13, '\xf9'): 13,
(13, '\xfa'): 13,
(13, '\xfb'): 13,
(13, '\xfc'): 13,
(13, '\xfd'): 13,
(13, '\xfe'): 13,
(13, '\xff'): 13,
(15, '*'): 80,
(15, '/'): 82,
(15, '\\'): 81,
(17, '-'): 79,
(17, '='): 78,
(18, ']'): 29,
(19, '}'): 29,
(20, '\x00'): 20,
(20, '\x01'): 20,
(20, '\x02'): 20,
(20, '\x03'): 20,
(20, '\x04'): 20,
(20, '\x05'): 20,
(20, '\x06'): 20,
(20, '\x07'): 20,
(20, '\x08'): 20,
(20, '\t'): 20,
(20, '\n'): 20,
(20, '\x0b'): 20,
(20, '\x0c'): 20,
(20, '\r'): 20,
(20, '\x0e'): 20,
(20, '\x0f'): 20,
(20, '\x10'): 20,
(20, '\x11'): 20,
(20, '\x12'): 20,
(20, '\x13'): 20,
(20, '\x14'): 20,
(20, '\x15'): 20,
(20, '\x16'): 20,
(20, '\x17'): 20,
(20, '\x18'): 20,
(20, '\x19'): 20,
(20, '\x1a'): 20,
(20, '\x1b'): 20,
(20, '\x1c'): 20,
(20, '\x1d'): 20,
(20, '\x1e'): 20,
(20, '\x1f'): 20,
(20, ' '): 20,
(20, '!'): 20,
(20, '"'): 77,
(20, '#'): 20,
(20, '$'): 20,
(20, '%'): 20,
(20, '&'): 20,
(20, "'"): 20,
(20, '('): 20,
(20, ')'): 20,
(20, '*'): 20,
(20, '+'): 20,
(20, ','): 20,
(20, '-'): 20,
(20, '.'): 20,
(20, '/'): 20,
(20, '0'): 20,
(20, '1'): 20,
(20, '2'): 20,
(20, '3'): 20,
(20, '4'): 20,
(20, '5'): 20,
(20, '6'): 20,
(20, '7'): 20,
(20, '8'): 20,
(20, '9'): 20,
(20, ':'): 20,
(20, ';'): 20,
(20, '<'): 20,
(20, '='): 20,
(20, '>'): 20,
(20, '?'): 20,
(20, '@'): 20,
(20, 'A'): 20,
(20, 'B'): 20,
(20, 'C'): 20,
(20, 'D'): 20,
(20, 'E'): 20,
(20, 'F'): 20,
(20, 'G'): 20,
(20, 'H'): 20,
(20, 'I'): 20,
(20, 'J'): 20,
(20, 'K'): 20,
(20, 'L'): 20,
(20, 'M'): 20,
(20, 'N'): 20,
(20, 'O'): 20,
(20, 'P'): 20,
(20, 'Q'): 20,
(20, 'R'): 20,
(20, 'S'): 20,
(20, 'T'): 20,
(20, 'U'): 20,
(20, 'V'): 20,
(20, 'W'): 20,
(20, 'X'): 20,
(20, 'Y'): 20,
(20, 'Z'): 20,
(20, '['): 20,
(20, '\\'): 20,
(20, ']'): 20,
(20, '^'): 20,
(20, '_'): 20,
(20, '`'): 20,
(20, 'a'): 20,
(20, 'b'): 20,
(20, 'c'): 20,
(20, 'd'): 20,
(20, 'e'): 20,
(20, 'f'): 20,
(20, 'g'): 20,
(20, 'h'): 20,
(20, 'i'): 20,
(20, 'j'): 20,
(20, 'k'): 20,
(20, 'l'): 20,
(20, 'm'): 20,
(20, 'n'): 20,
(20, 'o'): 20,
(20, 'p'): 20,
(20, 'q'): 20,
(20, 'r'): 20,
(20, 's'): 20,
(20, 't'): 20,
(20, 'u'): 20,
(20, 'v'): 20,
(20, 'w'): 20,
(20, 'x'): 20,
(20, 'y'): 20,
(20, 'z'): 20,
(20, '{'): 20,
(20, '|'): 20,
(20, '}'): 20,
(20, '~'): 20,
(20, '\x7f'): 20,
(20, '\x80'): 20,
(20, '\x81'): 20,
(20, '\x82'): 20,
(20, '\x83'): 20,
(20, '\x84'): 20,
(20, '\x85'): 20,
(20, '\x86'): 20,
(20, '\x87'): 20,
(20, '\x88'): 20,
(20, '\x89'): 20,
(20, '\x8a'): 20,
(20, '\x8b'): 20,
(20, '\x8c'): 20,
(20, '\x8d'): 20,
(20, '\x8e'): 20,
(20, '\x8f'): 20,
(20, '\x90'): 20,
(20, '\x91'): 20,
(20, '\x92'): 20,
(20, '\x93'): 20,
(20, '\x94'): 20,
(20, '\x95'): 20,
(20, '\x96'): 20,
(20, '\x97'): 20,
(20, '\x98'): 20,
(20, '\x99'): 20,
(20, '\x9a'): 20,
(20, '\x9b'): 20,
(20, '\x9c'): 20,
(20, '\x9d'): 20,
(20, '\x9e'): 20,
(20, '\x9f'): 20,
(20, '\xa0'): 20,
(20, '\xa1'): 20,
(20, '\xa2'): 20,
(20, '\xa3'): 20,
(20, '\xa4'): 20,
(20, '\xa5'): 20,
(20, '\xa6'): 20,
(20, '\xa7'): 20,
(20, '\xa8'): 20,
(20, '\xa9'): 20,
(20, '\xaa'): 20,
(20, '\xab'): 20,
(20, '\xac'): 20,
(20, '\xad'): 20,
(20, '\xae'): 20,
(20, '\xaf'): 20,
(20, '\xb0'): 20,
(20, '\xb1'): 20,
(20, '\xb2'): 20,
(20, '\xb3'): 20,
(20, '\xb4'): 20,
(20, '\xb5'): 20,
(20, '\xb6'): 20,
(20, '\xb7'): 20,
(20, '\xb8'): 20,
(20, '\xb9'): 20,
(20, '\xba'): 20,
(20, '\xbb'): 20,
(20, '\xbc'): 20,
(20, '\xbd'): 20,
(20, '\xbe'): 20,
(20, '\xbf'): 20,
(20, '\xc0'): 20,
(20, '\xc1'): 20,
(20, '\xc2'): 20,
(20, '\xc3'): 20,
(20, '\xc4'): 20,
(20, '\xc5'): 20,
(20, '\xc6'): 20,
(20, '\xc7'): 20,
(20, '\xc8'): 20,
(20, '\xc9'): 20,
(20, '\xca'): 20,
(20, '\xcb'): 20,
(20, '\xcc'): 20,
(20, '\xcd'): 20,
(20, '\xce'): 20,
(20, '\xcf'): 20,
(20, '\xd0'): 20,
(20, '\xd1'): 20,
(20, '\xd2'): 20,
(20, '\xd3'): 20,
(20, '\xd4'): 20,
(20, '\xd5'): 20,
(20, '\xd6'): 20,
(20, '\xd7'): 20,
(20, '\xd8'): 20,
(20, '\xd9'): 20,
(20, '\xda'): 20,
(20, '\xdb'): 20,
(20, '\xdc'): 20,
(20, '\xdd'): 20,
(20, '\xde'): 20,
(20, '\xdf'): 20,
(20, '\xe0'): 20,
(20, '\xe1'): 20,
(20, '\xe2'): 20,
(20, '\xe3'): 20,
(20, '\xe4'): 20,
(20, '\xe5'): 20,
(20, '\xe6'): 20,
(20, '\xe7'): 20,
(20, '\xe8'): 20,
(20, '\xe9'): 20,
(20, '\xea'): 20,
(20, '\xeb'): 20,
(20, '\xec'): 20,
(20, '\xed'): 20,
(20, '\xee'): 20,
(20, '\xef'): 20,
(20, '\xf0'): 20,
(20, '\xf1'): 20,
(20, '\xf2'): 20,
(20, '\xf3'): 20,
(20, '\xf4'): 20,
(20, '\xf5'): 20,
(20, '\xf6'): 20,
(20, '\xf7'): 20,
(20, '\xf8'): 20,
(20, '\xf9'): 20,
(20, '\xfa'): 20,
(20, '\xfb'): 20,
(20, '\xfc'): 20,
(20, '\xfd'): 20,
(20, '\xfe'): 20,
(20, '\xff'): 20,
(21, '*'): 76,
(23, '-'): 75,
(24, '='): 73,
(24, '>'): 74,
(26, '0'): 10,
(26, '1'): 10,
(26, '2'): 10,
(26, '3'): 10,
(26, '4'): 10,
(26, '5'): 10,
(26, '6'): 10,
(26, '7'): 10,
(26, '8'): 10,
(26, '9'): 10,
(26, 'A'): 10,
(26, 'B'): 10,
(26, 'C'): 10,
(26, 'D'): 10,
(26, 'E'): 10,
(26, 'F'): 10,
(26, 'G'): 10,
(26, 'H'): 10,
(26, 'I'): 10,
(26, 'J'): 10,
(26, 'K'): 10,
(26, 'L'): 10,
(26, 'M'): 10,
(26, 'N'): 10,
(26, 'O'): 10,
(26, 'P'): 10,
(26, 'Q'): 10,
(26, 'R'): 10,
(26, 'S'): 10,
(26, 'T'): 10,
(26, 'U'): 10,
(26, 'V'): 10,
(26, 'W'): 10,
(26, 'X'): 10,
(26, 'Y'): 10,
(26, 'Z'): 10,
(26, '_'): 10,
(26, 'a'): 10,
(26, 'b'): 10,
(26, 'c'): 10,
(26, 'd'): 10,
(26, 'e'): 10,
(26, 'f'): 10,
(26, 'g'): 10,
(26, 'h'): 10,
(26, 'i'): 10,
(26, 'j'): 10,
(26, 'k'): 10,
(26, 'l'): 69,
(26, 'm'): 10,
(26, 'n'): 10,
(26, 'o'): 10,
(26, 'p'): 10,
(26, 'q'): 10,
(26, 'r'): 10,
(26, 's'): 10,
(26, 't'): 10,
(26, 'u'): 10,
(26, 'v'): 10,
(26, 'w'): 10,
(26, 'x'): 10,
(26, 'y'): 10,
(26, 'z'): 10,
(27, '0'): 10,
(27, '1'): 10,
(27, '2'): 10,
(27, '3'): 10,
(27, '4'): 10,
(27, '5'): 10,
(27, '6'): 10,
(27, '7'): 10,
(27, '8'): 10,
(27, '9'): 10,
(27, 'A'): 10,
(27, 'B'): 10,
(27, 'C'): 10,
(27, 'D'): 10,
(27, 'E'): 10,
(27, 'F'): 10,
(27, 'G'): 10,
(27, 'H'): 10,
(27, 'I'): 10,
(27, 'J'): 10,
(27, 'K'): 10,
(27, 'L'): 10,
(27, 'M'): 10,
(27, 'N'): 10,
(27, 'O'): 10,
(27, 'P'): 10,
(27, 'Q'): 10,
(27, 'R'): 10,
(27, 'S'): 10,
(27, 'T'): 10,
(27, 'U'): 10,
(27, 'V'): 10,
(27, 'W'): 10,
(27, 'X'): 10,
(27, 'Y'): 10,
(27, 'Z'): 10,
(27, '_'): 10,
(27, 'a'): 10,
(27, 'b'): 10,
(27, 'c'): 10,
(27, 'd'): 10,
(27, 'e'): 67,
(27, 'f'): 10,
(27, 'g'): 10,
(27, 'h'): 10,
(27, 'i'): 10,
(27, 'j'): 10,
(27, 'k'): 10,
(27, 'l'): 10,
(27, 'm'): 10,
(27, 'n'): 10,
(27, 'o'): 10,
(27, 'p'): 10,
(27, 'q'): 10,
(27, 'r'): 10,
(27, 's'): 10,
(27, 't'): 10,
(27, 'u'): 10,
(27, 'v'): 10,
(27, 'w'): 10,
(27, 'x'): 10,
(27, 'y'): 10,
(27, 'z'): 10,
(30, '\x00'): 30,
(30, '\x01'): 30,
(30, '\x02'): 30,
(30, '\x03'): 30,
(30, '\x04'): 30,
(30, '\x05'): 30,
(30, '\x06'): 30,
(30, '\x07'): 30,
(30, '\x08'): 30,
(30, '\t'): 30,
(30, '\x0b'): 30,
(30, '\x0c'): 30,
(30, '\r'): 30,
(30, '\x0e'): 30,
(30, '\x0f'): 30,
(30, '\x10'): 30,
(30, '\x11'): 30,
(30, '\x12'): 30,
(30, '\x13'): 30,
(30, '\x14'): 30,
(30, '\x15'): 30,
(30, '\x16'): 30,
(30, '\x17'): 30,
(30, '\x18'): 30,
(30, '\x19'): 30,
(30, '\x1a'): 30,
(30, '\x1b'): 30,
(30, '\x1c'): 30,
(30, '\x1d'): 30,
(30, '\x1e'): 30,
(30, '\x1f'): 30,
(30, ' '): 30,
(30, '!'): 30,
(30, '"'): 30,
(30, '#'): 30,
(30, '$'): 30,
(30, '%'): 30,
(30, '&'): 30,
(30, "'"): 30,
(30, '('): 30,
(30, ')'): 30,
(30, '*'): 30,
(30, '+'): 30,
(30, ','): 30,
(30, '-'): 30,
(30, '.'): 30,
(30, '/'): 30,
(30, '0'): 30,
(30, '1'): 30,
(30, '2'): 30,
(30, '3'): 30,
(30, '4'): 30,
(30, '5'): 30,
(30, '6'): 30,
(30, '7'): 30,
(30, '8'): 30,
(30, '9'): 30,
(30, ':'): 30,
(30, ';'): 30,
(30, '<'): 30,
(30, '='): 30,
(30, '>'): 30,
(30, '?'): 30,
(30, '@'): 30,
(30, 'A'): 30,
(30, 'B'): 30,
(30, 'C'): 30,
(30, 'D'): 30,
(30, 'E'): 30,
(30, 'F'): 30,
(30, 'G'): 30,
(30, 'H'): 30,
(30, 'I'): 30,
(30, 'J'): 30,
(30, 'K'): 30,
(30, 'L'): 30,
(30, 'M'): 30,
(30, 'N'): 30,
(30, 'O'): 30,
(30, 'P'): 30,
(30, 'Q'): 30,
(30, 'R'): 30,
(30, 'S'): 30,
(30, 'T'): 30,
(30, 'U'): 30,
(30, 'V'): 30,
(30, 'W'): 30,
(30, 'X'): 30,
(30, 'Y'): 30,
(30, 'Z'): 30,
(30, '['): 30,
(30, '\\'): 30,
(30, ']'): 30,
(30, '^'): 30,
(30, '_'): 30,
(30, '`'): 30,
(30, 'a'): 30,
(30, 'b'): 30,
(30, 'c'): 30,
(30, 'd'): 30,
(30, 'e'): 30,
(30, 'f'): 30,
(30, 'g'): 30,
(30, 'h'): 30,
(30, 'i'): 30,
(30, 'j'): 30,
(30, 'k'): 30,
(30, 'l'): 30,
(30, 'm'): 30,
(30, 'n'): 30,
(30, 'o'): 30,
(30, 'p'): 30,
(30, 'q'): 30,
(30, 'r'): 30,
(30, 's'): 30,
(30, 't'): 30,
(30, 'u'): 30,
(30, 'v'): 30,
(30, 'w'): 30,
(30, 'x'): 30,
(30, 'y'): 30,
(30, 'z'): 30,
(30, '{'): 30,
(30, '|'): 30,
(30, '}'): 30,
(30, '~'): 30,
(30, '\x7f'): 30,
(30, '\x80'): 30,
(30, '\x81'): 30,
(30, '\x82'): 30,
(30, '\x83'): 30,
(30, '\x84'): 30,
(30, '\x85'): 30,
(30, '\x86'): 30,
(30, '\x87'): 30,
(30, '\x88'): 30,
(30, '\x89'): 30,
(30, '\x8a'): 30,
(30, '\x8b'): 30,
(30, '\x8c'): 30,
(30, '\x8d'): 30,
(30, '\x8e'): 30,
(30, '\x8f'): 30,
(30, '\x90'): 30,
(30, '\x91'): 30,
(30, '\x92'): 30,
(30, '\x93'): 30,
(30, '\x94'): 30,
(30, '\x95'): 30,
(30, '\x96'): 30,
(30, '\x97'): 30,
(30, '\x98'): 30,
(30, '\x99'): 30,
(30, '\x9a'): 30,
(30, '\x9b'): 30,
(30, '\x9c'): 30,
(30, '\x9d'): 30,
(30, '\x9e'): 30,
(30, '\x9f'): 30,
(30, '\xa0'): 30,
(30, '\xa1'): 30,
(30, '\xa2'): 30,
(30, '\xa3'): 30,
(30, '\xa4'): 30,
(30, '\xa5'): 30,
(30, '\xa6'): 30,
(30, '\xa7'): 30,
(30, '\xa8'): 30,
(30, '\xa9'): 30,
(30, '\xaa'): 30,
(30, '\xab'): 30,
(30, '\xac'): 30,
(30, '\xad'): 30,
(30, '\xae'): 30,
(30, '\xaf'): 30,
(30, '\xb0'): 30,
(30, '\xb1'): 30,
(30, '\xb2'): 30,
(30, '\xb3'): 30,
(30, '\xb4'): 30,
(30, '\xb5'): 30,
(30, '\xb6'): 30,
(30, '\xb7'): 30,
(30, '\xb8'): 30,
(30, '\xb9'): 30,
(30, '\xba'): 30,
(30, '\xbb'): 30,
(30, '\xbc'): 30,
(30, '\xbd'): 30,
(30, '\xbe'): 30,
(30, '\xbf'): 30,
(30, '\xc0'): 30,
(30, '\xc1'): 30,
(30, '\xc2'): 30,
(30, '\xc3'): 30,
(30, '\xc4'): 30,
(30, '\xc5'): 30,
(30, '\xc6'): 30,
(30, '\xc7'): 30,
(30, '\xc8'): 30,
(30, '\xc9'): 30,
(30, '\xca'): 30,
(30, '\xcb'): 30,
(30, '\xcc'): 30,
(30, '\xcd'): 30,
(30, '\xce'): 30,
(30, '\xcf'): 30,
(30, '\xd0'): 30,
(30, '\xd1'): 30,
(30, '\xd2'): 30,
(30, '\xd3'): 30,
(30, '\xd4'): 30,
(30, '\xd5'): 30,
(30, '\xd6'): 30,
(30, '\xd7'): 30,
(30, '\xd8'): 30,
(30, '\xd9'): 30,
(30, '\xda'): 30,
(30, '\xdb'): 30,
(30, '\xdc'): 30,
(30, '\xdd'): 30,
(30, '\xde'): 30,
(30, '\xdf'): 30,
(30, '\xe0'): 30,
(30, '\xe1'): 30,
(30, '\xe2'): 30,
(30, '\xe3'): 30,
(30, '\xe4'): 30,
(30, '\xe5'): 30,
(30, '\xe6'): 30,
(30, '\xe7'): 30,
(30, '\xe8'): 30,
(30, '\xe9'): 30,
(30, '\xea'): 30,
(30, '\xeb'): 30,
(30, '\xec'): 30,
(30, '\xed'): 30,
(30, '\xee'): 30,
(30, '\xef'): 30,
(30, '\xf0'): 30,
(30, '\xf1'): 30,
(30, '\xf2'): 30,
(30, '\xf3'): 30,
(30, '\xf4'): 30,
(30, '\xf5'): 30,
(30, '\xf6'): 30,
(30, '\xf7'): 30,
(30, '\xf8'): 30,
(30, '\xf9'): 30,
(30, '\xfa'): 30,
(30, '\xfb'): 30,
(30, '\xfc'): 30,
(30, '\xfd'): 30,
(30, '\xfe'): 30,
(30, '\xff'): 30,
(32, '-'): 64,
(32, '>'): 65,
(33, '.'): 56,
(33, ':'): 57,
(33, '<'): 55,
(33, '='): 58,
(33, '@'): 54,
(33, '\\'): 59,
(35, '0'): 10,
(35, '1'): 10,
(35, '2'): 10,
(35, '3'): 10,
(35, '4'): 10,
(35, '5'): 10,
(35, '6'): 10,
(35, '7'): 10,
(35, '8'): 10,
(35, '9'): 10,
(35, 'A'): 10,
(35, 'B'): 10,
(35, 'C'): 10,
(35, 'D'): 10,
(35, 'E'): 10,
(35, 'F'): 10,
(35, 'G'): 10,
(35, 'H'): 10,
(35, 'I'): 10,
(35, 'J'): 10,
(35, 'K'): 10,
(35, 'L'): 10,
(35, 'M'): 10,
(35, 'N'): 10,
(35, 'O'): 10,
(35, 'P'): 10,
(35, 'Q'): 10,
(35, 'R'): 10,
(35, 'S'): 10,
(35, 'T'): 10,
(35, 'U'): 10,
(35, 'V'): 10,
(35, 'W'): 10,
(35, 'X'): 10,
(35, 'Y'): 10,
(35, 'Z'): 10,
(35, '_'): 10,
(35, 'a'): 10,
(35, 'b'): 10,
(35, 'c'): 10,
(35, 'd'): 10,
(35, 'e'): 10,
(35, 'f'): 10,
(35, 'g'): 10,
(35, 'h'): 10,
(35, 'i'): 10,
(35, 'j'): 10,
(35, 'k'): 10,
(35, 'l'): 10,
(35, 'm'): 10,
(35, 'n'): 10,
(35, 'o'): 10,
(35, 'p'): 10,
(35, 'q'): 10,
(35, 'r'): 10,
(35, 's'): 53,
(35, 't'): 10,
(35, 'u'): 10,
(35, 'v'): 10,
(35, 'w'): 10,
(35, 'x'): 10,
(35, 'y'): 10,
(35, 'z'): 10,
(36, '0'): 10,
(36, '1'): 10,
(36, '2'): 10,
(36, '3'): 10,
(36, '4'): 10,
(36, '5'): 10,
(36, '6'): 10,
(36, '7'): 10,
(36, '8'): 10,
(36, '9'): 10,
(36, 'A'): 10,
(36, 'B'): 10,
(36, 'C'): 10,
(36, 'D'): 10,
(36, 'E'): 10,
(36, 'F'): 10,
(36, 'G'): 10,
(36, 'H'): 10,
(36, 'I'): 10,
(36, 'J'): 10,
(36, 'K'): 10,
(36, 'L'): 10,
(36, 'M'): 10,
(36, 'N'): 10,
(36, 'O'): 10,
(36, 'P'): 10,
(36, 'Q'): 10,
(36, 'R'): 10,
(36, 'S'): 10,
(36, 'T'): 10,
(36, 'U'): 10,
(36, 'V'): 10,
(36, 'W'): 10,
(36, 'X'): 10,
(36, 'Y'): 10,
(36, 'Z'): 10,
(36, '_'): 10,
(36, 'a'): 10,
(36, 'b'): 10,
(36, 'c'): 10,
(36, 'd'): 10,
(36, 'e'): 38,
(36, 'f'): 10,
(36, 'g'): 10,
(36, 'h'): 10,
(36, 'i'): 10,
(36, 'j'): 10,
(36, 'k'): 10,
(36, 'l'): 10,
(36, 'm'): 10,
(36, 'n'): 10,
(36, 'o'): 39,
(36, 'p'): 10,
(36, 'q'): 10,
(36, 'r'): 10,
(36, 's'): 10,
(36, 't'): 10,
(36, 'u'): 10,
(36, 'v'): 10,
(36, 'w'): 10,
(36, 'x'): 10,
(36, 'y'): 10,
(36, 'z'): 10,
(38, '0'): 10,
(38, '1'): 10,
(38, '2'): 10,
(38, '3'): 10,
(38, '4'): 10,
(38, '5'): 10,
(38, '6'): 10,
(38, '7'): 10,
(38, '8'): 10,
(38, '9'): 10,
(38, 'A'): 10,
(38, 'B'): 10,
(38, 'C'): 10,
(38, 'D'): 10,
(38, 'E'): 10,
(38, 'F'): 10,
(38, 'G'): 10,
(38, 'H'): 10,
(38, 'I'): 10,
(38, 'J'): 10,
(38, 'K'): 10,
(38, 'L'): 10,
(38, 'M'): 10,
(38, 'N'): 10,
(38, 'O'): 10,
(38, 'P'): 10,
(38, 'Q'): 10,
(38, 'R'): 10,
(38, 'S'): 10,
(38, 'T'): 10,
(38, 'U'): 10,
(38, 'V'): 10,
(38, 'W'): 10,
(38, 'X'): 10,
(38, 'Y'): 10,
(38, 'Z'): 10,
(38, '_'): 10,
(38, 'a'): 10,
(38, 'b'): 10,
(38, 'c'): 10,
(38, 'd'): 10,
(38, 'e'): 10,
(38, 'f'): 10,
(38, 'g'): 10,
(38, 'h'): 10,
(38, 'i'): 10,
(38, 'j'): 10,
(38, 'k'): 10,
(38, 'l'): 10,
(38, 'm'): 10,
(38, 'n'): 10,
(38, 'o'): 10,
(38, 'p'): 10,
(38, 'q'): 10,
(38, 'r'): 10,
(38, 's'): 10,
(38, 't'): 41,
(38, 'u'): 10,
(38, 'v'): 10,
(38, 'w'): 10,
(38, 'x'): 10,
(38, 'y'): 10,
(38, 'z'): 10,
(39, '0'): 10,
(39, '1'): 10,
(39, '2'): 10,
(39, '3'): 10,
(39, '4'): 10,
(39, '5'): 10,
(39, '6'): 10,
(39, '7'): 10,
(39, '8'): 10,
(39, '9'): 10,
(39, 'A'): 10,
(39, 'B'): 10,
(39, 'C'): 10,
(39, 'D'): 10,
(39, 'E'): 10,
(39, 'F'): 10,
(39, 'G'): 10,
(39, 'H'): 10,
(39, 'I'): 10,
(39, 'J'): 10,
(39, 'K'): 10,
(39, 'L'): 10,
(39, 'M'): 10,
(39, 'N'): 10,
(39, 'O'): 10,
(39, 'P'): 10,
(39, 'Q'): 10,
(39, 'R'): 10,
(39, 'S'): 10,
(39, 'T'): 10,
(39, 'U'): 10,
(39, 'V'): 10,
(39, 'W'): 10,
(39, 'X'): 10,
(39, 'Y'): 10,
(39, 'Z'): 10,
(39, '_'): 10,
(39, 'a'): 10,
(39, 'b'): 10,
(39, 'c'): 10,
(39, 'd'): 40,
(39, 'e'): 10,
(39, 'f'): 10,
(39, 'g'): 10,
(39, 'h'): 10,
(39, 'i'): 10,
(39, 'j'): 10,
(39, 'k'): 10,
(39, 'l'): 10,
(39, 'm'): 10,
(39, 'n'): 10,
(39, 'o'): 10,
(39, 'p'): 10,
(39, 'q'): 10,
(39, 'r'): 10,
(39, 's'): 10,
(39, 't'): 10,
(39, 'u'): 10,
(39, 'v'): 10,
(39, 'w'): 10,
(39, 'x'): 10,
(39, 'y'): 10,
(39, 'z'): 10,
(40, '0'): 10,
(40, '1'): 10,
(40, '2'): 10,
(40, '3'): 10,
(40, '4'): 10,
(40, '5'): 10,
(40, '6'): 10,
(40, '7'): 10,
(40, '8'): 10,
(40, '9'): 10,
(40, 'A'): 10,
(40, 'B'): 10,
(40, 'C'): 10,
(40, 'D'): 10,
(40, 'E'): 10,
(40, 'F'): 10,
(40, 'G'): 10,
(40, 'H'): 10,
(40, 'I'): 10,
(40, 'J'): 10,
(40, 'K'): 10,
(40, 'L'): 10,
(40, 'M'): 10,
(40, 'N'): 10,
(40, 'O'): 10,
(40, 'P'): 10,
(40, 'Q'): 10,
(40, 'R'): 10,
(40, 'S'): 10,
(40, 'T'): 10,
(40, 'U'): 10,
(40, 'V'): 10,
(40, 'W'): 10,
(40, 'X'): 10,
(40, 'Y'): 10,
(40, 'Z'): 10,
(40, '_'): 10,
(40, 'a'): 10,
(40, 'b'): 10,
(40, 'c'): 10,
(40, 'd'): 10,
(40, 'e'): 10,
(40, 'f'): 10,
(40, 'g'): 10,
(40, 'h'): 10,
(40, 'i'): 10,
(40, 'j'): 10,
(40, 'k'): 10,
(40, 'l'): 10,
(40, 'm'): 10,
(40, 'n'): 10,
(40, 'o'): 10,
(40, 'p'): 10,
(40, 'q'): 10,
(40, 'r'): 10,
(40, 's'): 10,
(40, 't'): 10,
(40, 'u'): 10,
(40, 'v'): 10,
(40, 'w'): 10,
(40, 'x'): 10,
(40, 'y'): 10,
(40, 'z'): 10,
(41, '0'): 10,
(41, '1'): 10,
(41, '2'): 10,
(41, '3'): 10,
(41, '4'): 10,
(41, '5'): 10,
(41, '6'): 10,
(41, '7'): 10,
(41, '8'): 10,
(41, '9'): 10,
(41, 'A'): 10,
(41, 'B'): 10,
(41, 'C'): 10,
(41, 'D'): 10,
(41, 'E'): 10,
(41, 'F'): 10,
(41, 'G'): 10,
(41, 'H'): 10,
(41, 'I'): 10,
(41, 'J'): 10,
(41, 'K'): 10,
(41, 'L'): 10,
(41, 'M'): 10,
(41, 'N'): 10,
(41, 'O'): 10,
(41, 'P'): 10,
(41, 'Q'): 10,
(41, 'R'): 10,
(41, 'S'): 10,
(41, 'T'): 10,
(41, 'U'): 10,
(41, 'V'): 10,
(41, 'W'): 10,
(41, 'X'): 10,
(41, 'Y'): 10,
(41, 'Z'): 10,
(41, '_'): 10,
(41, 'a'): 42,
(41, 'b'): 10,
(41, 'c'): 10,
(41, 'd'): 10,
(41, 'e'): 10,
(41, 'f'): 10,
(41, 'g'): 10,
(41, 'h'): 10,
(41, 'i'): 10,
(41, 'j'): 10,
(41, 'k'): 10,
(41, 'l'): 10,
(41, 'm'): 10,
(41, 'n'): 10,
(41, 'o'): 10,
(41, 'p'): 10,
(41, 'q'): 10,
(41, 'r'): 10,
(41, 's'): 10,
(41, 't'): 10,
(41, 'u'): 10,
(41, 'v'): 10,
(41, 'w'): 10,
(41, 'x'): 10,
(41, 'y'): 10,
(41, 'z'): 10,
(42, '0'): 10,
(42, '1'): 10,
(42, '2'): 10,
(42, '3'): 10,
(42, '4'): 10,
(42, '5'): 10,
(42, '6'): 10,
(42, '7'): 10,
(42, '8'): 10,
(42, '9'): 10,
(42, 'A'): 10,
(42, 'B'): 10,
(42, 'C'): 10,
(42, 'D'): 10,
(42, 'E'): 10,
(42, 'F'): 10,
(42, 'G'): 10,
(42, 'H'): 10,
(42, 'I'): 10,
(42, 'J'): 10,
(42, 'K'): 10,
(42, 'L'): 10,
(42, 'M'): 10,
(42, 'N'): 10,
(42, 'O'): 10,
(42, 'P'): 10,
(42, 'Q'): 10,
(42, 'R'): 10,
(42, 'S'): 10,
(42, 'T'): 10,
(42, 'U'): 10,
(42, 'V'): 10,
(42, 'W'): 10,
(42, 'X'): 10,
(42, 'Y'): 10,
(42, 'Z'): 10,
(42, '_'): 43,
(42, 'a'): 10,
(42, 'b'): 10,
(42, 'c'): 10,
(42, 'd'): 10,
(42, 'e'): 10,
(42, 'f'): 10,
(42, 'g'): 10,
(42, 'h'): 10,
(42, 'i'): 10,
(42, 'j'): 10,
(42, 'k'): 10,
(42, 'l'): 10,
(42, 'm'): 10,
(42, 'n'): 10,
(42, 'o'): 10,
(42, 'p'): 10,
(42, 'q'): 10,
(42, 'r'): 10,
(42, 's'): 10,
(42, 't'): 10,
(42, 'u'): 10,
(42, 'v'): 10,
(42, 'w'): 10,
(42, 'x'): 10,
(42, 'y'): 10,
(42, 'z'): 10,
(43, '0'): 10,
(43, '1'): 10,
(43, '2'): 10,
(43, '3'): 10,
(43, '4'): 10,
(43, '5'): 10,
(43, '6'): 10,
(43, '7'): 10,
(43, '8'): 10,
(43, '9'): 10,
(43, 'A'): 10,
(43, 'B'): 10,
(43, 'C'): 10,
(43, 'D'): 10,
(43, 'E'): 10,
(43, 'F'): 10,
(43, 'G'): 10,
(43, 'H'): 10,
(43, 'I'): 10,
(43, 'J'): 10,
(43, 'K'): 10,
(43, 'L'): 10,
(43, 'M'): 10,
(43, 'N'): 10,
(43, 'O'): 10,
(43, 'P'): 10,
(43, 'Q'): 10,
(43, 'R'): 10,
(43, 'S'): 10,
(43, 'T'): 10,
(43, 'U'): 10,
(43, 'V'): 10,
(43, 'W'): 10,
(43, 'X'): 10,
(43, 'Y'): 10,
(43, 'Z'): 10,
(43, '_'): 10,
(43, 'a'): 10,
(43, 'b'): 10,
(43, 'c'): 10,
(43, 'd'): 10,
(43, 'e'): 10,
(43, 'f'): 10,
(43, 'g'): 10,
(43, 'h'): 10,
(43, 'i'): 10,
(43, 'j'): 10,
(43, 'k'): 10,
(43, 'l'): 10,
(43, 'm'): 10,
(43, 'n'): 10,
(43, 'o'): 10,
(43, 'p'): 44,
(43, 'q'): 10,
(43, 'r'): 10,
(43, 's'): 10,
(43, 't'): 10,
(43, 'u'): 10,
(43, 'v'): 10,
(43, 'w'): 10,
(43, 'x'): 10,
(43, 'y'): 10,
(43, 'z'): 10,
(44, '0'): 10,
(44, '1'): 10,
(44, '2'): 10,
(44, '3'): 10,
(44, '4'): 10,
(44, '5'): 10,
(44, '6'): 10,
(44, '7'): 10,
(44, '8'): 10,
(44, '9'): 10,
(44, 'A'): 10,
(44, 'B'): 10,
(44, 'C'): 10,
(44, 'D'): 10,
(44, 'E'): 10,
(44, 'F'): 10,
(44, 'G'): 10,
(44, 'H'): 10,
(44, 'I'): 10,
(44, 'J'): 10,
(44, 'K'): 10,
(44, 'L'): 10,
(44, 'M'): 10,
(44, 'N'): 10,
(44, 'O'): 10,
(44, 'P'): 10,
(44, 'Q'): 10,
(44, 'R'): 10,
(44, 'S'): 10,
(44, 'T'): 10,
(44, 'U'): 10,
(44, 'V'): 10,
(44, 'W'): 10,
(44, 'X'): 10,
(44, 'Y'): 10,
(44, 'Z'): 10,
(44, '_'): 10,
(44, 'a'): 10,
(44, 'b'): 10,
(44, 'c'): 10,
(44, 'd'): 10,
(44, 'e'): 10,
(44, 'f'): 10,
(44, 'g'): 10,
(44, 'h'): 10,
(44, 'i'): 10,
(44, 'j'): 10,
(44, 'k'): 10,
(44, 'l'): 10,
(44, 'm'): 10,
(44, 'n'): 10,
(44, 'o'): 10,
(44, 'p'): 10,
(44, 'q'): 10,
(44, 'r'): 45,
(44, 's'): 10,
(44, 't'): 10,
(44, 'u'): 10,
(44, 'v'): 10,
(44, 'w'): 10,
(44, 'x'): 10,
(44, 'y'): 10,
(44, 'z'): 10,
(45, '0'): 10,
(45, '1'): 10,
(45, '2'): 10,
(45, '3'): 10,
(45, '4'): 10,
(45, '5'): 10,
(45, '6'): 10,
(45, '7'): 10,
(45, '8'): 10,
(45, '9'): 10,
(45, 'A'): 10,
(45, 'B'): 10,
(45, 'C'): 10,
(45, 'D'): 10,
(45, 'E'): 10,
(45, 'F'): 10,
(45, 'G'): 10,
(45, 'H'): 10,
(45, 'I'): 10,
(45, 'J'): 10,
(45, 'K'): 10,
(45, 'L'): 10,
(45, 'M'): 10,
(45, 'N'): 10,
(45, 'O'): 10,
(45, 'P'): 10,
(45, 'Q'): 10,
(45, 'R'): 10,
(45, 'S'): 10,
(45, 'T'): 10,
(45, 'U'): 10,
(45, 'V'): 10,
(45, 'W'): 10,
(45, 'X'): 10,
(45, 'Y'): 10,
(45, 'Z'): 10,
(45, '_'): 10,
(45, 'a'): 10,
(45, 'b'): 10,
(45, 'c'): 10,
(45, 'd'): 10,
(45, 'e'): 46,
(45, 'f'): 10,
(45, 'g'): 10,
(45, 'h'): 10,
(45, 'i'): 10,
(45, 'j'): 10,
(45, 'k'): 10,
(45, 'l'): 10,
(45, 'm'): 10,
(45, 'n'): 10,
(45, 'o'): 10,
(45, 'p'): 10,
(45, 'q'): 10,
(45, 'r'): 10,
(45, 's'): 10,
(45, 't'): 10,
(45, 'u'): 10,
(45, 'v'): 10,
(45, 'w'): 10,
(45, 'x'): 10,
(45, 'y'): 10,
(45, 'z'): 10,
(46, '0'): 10,
(46, '1'): 10,
(46, '2'): 10,
(46, '3'): 10,
(46, '4'): 10,
(46, '5'): 10,
(46, '6'): 10,
(46, '7'): 10,
(46, '8'): 10,
(46, '9'): 10,
(46, 'A'): 10,
(46, 'B'): 10,
(46, 'C'): 10,
(46, 'D'): 10,
(46, 'E'): 10,
(46, 'F'): 10,
(46, 'G'): 10,
(46, 'H'): 10,
(46, 'I'): 10,
(46, 'J'): 10,
(46, 'K'): 10,
(46, 'L'): 10,
(46, 'M'): 10,
(46, 'N'): 10,
(46, 'O'): 10,
(46, 'P'): 10,
(46, 'Q'): 10,
(46, 'R'): 10,
(46, 'S'): 10,
(46, 'T'): 10,
(46, 'U'): 10,
(46, 'V'): 10,
(46, 'W'): 10,
(46, 'X'): 10,
(46, 'Y'): 10,
(46, 'Z'): 10,
(46, '_'): 10,
(46, 'a'): 10,
(46, 'b'): 10,
(46, 'c'): 10,
(46, 'd'): 47,
(46, 'e'): 10,
(46, 'f'): 10,
(46, 'g'): 10,
(46, 'h'): 10,
(46, 'i'): 10,
(46, 'j'): 10,
(46, 'k'): 10,
(46, 'l'): 10,
(46, 'm'): 10,
(46, 'n'): 10,
(46, 'o'): 10,
(46, 'p'): 10,
(46, 'q'): 10,
(46, 'r'): 10,
(46, 's'): 10,
(46, 't'): 10,
(46, 'u'): 10,
(46, 'v'): 10,
(46, 'w'): 10,
(46, 'x'): 10,
(46, 'y'): 10,
(46, 'z'): 10,
(47, '0'): 10,
(47, '1'): 10,
(47, '2'): 10,
(47, '3'): 10,
(47, '4'): 10,
(47, '5'): 10,
(47, '6'): 10,
(47, '7'): 10,
(47, '8'): 10,
(47, '9'): 10,
(47, 'A'): 10,
(47, 'B'): 10,
(47, 'C'): 10,
(47, 'D'): 10,
(47, 'E'): 10,
(47, 'F'): 10,
(47, 'G'): 10,
(47, 'H'): 10,
(47, 'I'): 10,
(47, 'J'): 10,
(47, 'K'): 10,
(47, 'L'): 10,
(47, 'M'): 10,
(47, 'N'): 10,
(47, 'O'): 10,
(47, 'P'): 10,
(47, 'Q'): 10,
(47, 'R'): 10,
(47, 'S'): 10,
(47, 'T'): 10,
(47, 'U'): 10,
(47, 'V'): 10,
(47, 'W'): 10,
(47, 'X'): 10,
(47, 'Y'): 10,
(47, 'Z'): 10,
(47, '_'): 10,
(47, 'a'): 10,
(47, 'b'): 10,
(47, 'c'): 10,
(47, 'd'): 10,
(47, 'e'): 10,
(47, 'f'): 10,
(47, 'g'): 10,
(47, 'h'): 10,
(47, 'i'): 48,
(47, 'j'): 10,
(47, 'k'): 10,
(47, 'l'): 10,
(47, 'm'): 10,
(47, 'n'): 10,
(47, 'o'): 10,
(47, 'p'): 10,
(47, 'q'): 10,
(47, 'r'): 10,
(47, 's'): 10,
(47, 't'): 10,
(47, 'u'): 10,
(47, 'v'): 10,
(47, 'w'): 10,
(47, 'x'): 10,
(47, 'y'): 10,
(47, 'z'): 10,
(48, '0'): 10,
(48, '1'): 10,
(48, '2'): 10,
(48, '3'): 10,
(48, '4'): 10,
(48, '5'): 10,
(48, '6'): 10,
(48, '7'): 10,
(48, '8'): 10,
(48, '9'): 10,
(48, 'A'): 10,
(48, 'B'): 10,
(48, 'C'): 10,
(48, 'D'): 10,
(48, 'E'): 10,
(48, 'F'): 10,
(48, 'G'): 10,
(48, 'H'): 10,
(48, 'I'): 10,
(48, 'J'): 10,
(48, 'K'): 10,
(48, 'L'): 10,
(48, 'M'): 10,
(48, 'N'): 10,
(48, 'O'): 10,
(48, 'P'): 10,
(48, 'Q'): 10,
(48, 'R'): 10,
(48, 'S'): 10,
(48, 'T'): 10,
(48, 'U'): 10,
(48, 'V'): 10,
(48, 'W'): 10,
(48, 'X'): 10,
(48, 'Y'): 10,
(48, 'Z'): 10,
(48, '_'): 10,
(48, 'a'): 10,
(48, 'b'): 10,
(48, 'c'): 49,
(48, 'd'): 10,
(48, 'e'): 10,
(48, 'f'): 10,
(48, 'g'): 10,
(48, 'h'): 10,
(48, 'i'): 10,
(48, 'j'): 10,
(48, 'k'): 10,
(48, 'l'): 10,
(48, 'm'): 10,
(48, 'n'): 10,
(48, 'o'): 10,
(48, 'p'): 10,
(48, 'q'): 10,
(48, 'r'): 10,
(48, 's'): 10,
(48, 't'): 10,
(48, 'u'): 10,
(48, 'v'): 10,
(48, 'w'): 10,
(48, 'x'): 10,
(48, 'y'): 10,
(48, 'z'): 10,
(49, '0'): 10,
(49, '1'): 10,
(49, '2'): 10,
(49, '3'): 10,
(49, '4'): 10,
(49, '5'): 10,
(49, '6'): 10,
(49, '7'): 10,
(49, '8'): 10,
(49, '9'): 10,
(49, 'A'): 10,
(49, 'B'): 10,
(49, 'C'): 10,
(49, 'D'): 10,
(49, 'E'): 10,
(49, 'F'): 10,
(49, 'G'): 10,
(49, 'H'): 10,
(49, 'I'): 10,
(49, 'J'): 10,
(49, 'K'): 10,
(49, 'L'): 10,
(49, 'M'): 10,
(49, 'N'): 10,
(49, 'O'): 10,
(49, 'P'): 10,
(49, 'Q'): 10,
(49, 'R'): 10,
(49, 'S'): 10,
(49, 'T'): 10,
(49, 'U'): 10,
(49, 'V'): 10,
(49, 'W'): 10,
(49, 'X'): 10,
(49, 'Y'): 10,
(49, 'Z'): 10,
(49, '_'): 10,
(49, 'a'): 50,
(49, 'b'): 10,
(49, 'c'): 10,
(49, 'd'): 10,
(49, 'e'): 10,
(49, 'f'): 10,
(49, 'g'): 10,
(49, 'h'): 10,
(49, 'i'): 10,
(49, 'j'): 10,
(49, 'k'): 10,
(49, 'l'): 10,
(49, 'm'): 10,
(49, 'n'): 10,
(49, 'o'): 10,
(49, 'p'): 10,
(49, 'q'): 10,
(49, 'r'): 10,
(49, 's'): 10,
(49, 't'): 10,
(49, 'u'): 10,
(49, 'v'): 10,
(49, 'w'): 10,
(49, 'x'): 10,
(49, 'y'): 10,
(49, 'z'): 10,
(50, '0'): 10,
(50, '1'): 10,
(50, '2'): 10,
(50, '3'): 10,
(50, '4'): 10,
(50, '5'): 10,
(50, '6'): 10,
(50, '7'): 10,
(50, '8'): 10,
(50, '9'): 10,
(50, 'A'): 10,
(50, 'B'): 10,
(50, 'C'): 10,
(50, 'D'): 10,
(50, 'E'): 10,
(50, 'F'): 10,
(50, 'G'): 10,
(50, 'H'): 10,
(50, 'I'): 10,
(50, 'J'): 10,
(50, 'K'): 10,
(50, 'L'): 10,
(50, 'M'): 10,
(50, 'N'): 10,
(50, 'O'): 10,
(50, 'P'): 10,
(50, 'Q'): 10,
(50, 'R'): 10,
(50, 'S'): 10,
(50, 'T'): 10,
(50, 'U'): 10,
(50, 'V'): 10,
(50, 'W'): 10,
(50, 'X'): 10,
(50, 'Y'): 10,
(50, 'Z'): 10,
(50, '_'): 10,
(50, 'a'): 10,
(50, 'b'): 10,
(50, 'c'): 10,
(50, 'd'): 10,
(50, 'e'): 10,
(50, 'f'): 10,
(50, 'g'): 10,
(50, 'h'): 10,
(50, 'i'): 10,
(50, 'j'): 10,
(50, 'k'): 10,
(50, 'l'): 10,
(50, 'm'): 10,
(50, 'n'): 10,
(50, 'o'): 10,
(50, 'p'): 10,
(50, 'q'): 10,
(50, 'r'): 10,
(50, 's'): 10,
(50, 't'): 51,
(50, 'u'): 10,
(50, 'v'): 10,
(50, 'w'): 10,
(50, 'x'): 10,
(50, 'y'): 10,
(50, 'z'): 10,
(51, '0'): 10,
(51, '1'): 10,
(51, '2'): 10,
(51, '3'): 10,
(51, '4'): 10,
(51, '5'): 10,
(51, '6'): 10,
(51, '7'): 10,
(51, '8'): 10,
(51, '9'): 10,
(51, 'A'): 10,
(51, 'B'): 10,
(51, 'C'): 10,
(51, 'D'): 10,
(51, 'E'): 10,
(51, 'F'): 10,
(51, 'G'): 10,
(51, 'H'): 10,
(51, 'I'): 10,
(51, 'J'): 10,
(51, 'K'): 10,
(51, 'L'): 10,
(51, 'M'): 10,
(51, 'N'): 10,
(51, 'O'): 10,
(51, 'P'): 10,
(51, 'Q'): 10,
(51, 'R'): 10,
(51, 'S'): 10,
(51, 'T'): 10,
(51, 'U'): 10,
(51, 'V'): 10,
(51, 'W'): 10,
(51, 'X'): 10,
(51, 'Y'): 10,
(51, 'Z'): 10,
(51, '_'): 10,
(51, 'a'): 10,
(51, 'b'): 10,
(51, 'c'): 10,
(51, 'd'): 10,
(51, 'e'): 52,
(51, 'f'): 10,
(51, 'g'): 10,
(51, 'h'): 10,
(51, 'i'): 10,
(51, 'j'): 10,
(51, 'k'): 10,
(51, 'l'): 10,
(51, 'm'): 10,
(51, 'n'): 10,
(51, 'o'): 10,
(51, 'p'): 10,
(51, 'q'): 10,
(51, 'r'): 10,
(51, 's'): 10,
(51, 't'): 10,
(51, 'u'): 10,
(51, 'v'): 10,
(51, 'w'): 10,
(51, 'x'): 10,
(51, 'y'): 10,
(51, 'z'): 10,
(52, '0'): 10,
(52, '1'): 10,
(52, '2'): 10,
(52, '3'): 10,
(52, '4'): 10,
(52, '5'): 10,
(52, '6'): 10,
(52, '7'): 10,
(52, '8'): 10,
(52, '9'): 10,
(52, 'A'): 10,
(52, 'B'): 10,
(52, 'C'): 10,
(52, 'D'): 10,
(52, 'E'): 10,
(52, 'F'): 10,
(52, 'G'): 10,
(52, 'H'): 10,
(52, 'I'): 10,
(52, 'J'): 10,
(52, 'K'): 10,
(52, 'L'): 10,
(52, 'M'): 10,
(52, 'N'): 10,
(52, 'O'): 10,
(52, 'P'): 10,
(52, 'Q'): 10,
(52, 'R'): 10,
(52, 'S'): 10,
(52, 'T'): 10,
(52, 'U'): 10,
(52, 'V'): 10,
(52, 'W'): 10,
(52, 'X'): 10,
(52, 'Y'): 10,
(52, 'Z'): 10,
(52, '_'): 10,
(52, 'a'): 10,
(52, 'b'): 10,
(52, 'c'): 10,
(52, 'd'): 10,
(52, 'e'): 10,
(52, 'f'): 10,
(52, 'g'): 10,
(52, 'h'): 10,
(52, 'i'): 10,
(52, 'j'): 10,
(52, 'k'): 10,
(52, 'l'): 10,
(52, 'm'): 10,
(52, 'n'): 10,
(52, 'o'): 10,
(52, 'p'): 10,
(52, 'q'): 10,
(52, 'r'): 10,
(52, 's'): 10,
(52, 't'): 10,
(52, 'u'): 10,
(52, 'v'): 10,
(52, 'w'): 10,
(52, 'x'): 10,
(52, 'y'): 10,
(52, 'z'): 10,
(53, '0'): 10,
(53, '1'): 10,
(53, '2'): 10,
(53, '3'): 10,
(53, '4'): 10,
(53, '5'): 10,
(53, '6'): 10,
(53, '7'): 10,
(53, '8'): 10,
(53, '9'): 10,
(53, 'A'): 10,
(53, 'B'): 10,
(53, 'C'): 10,
(53, 'D'): 10,
(53, 'E'): 10,
(53, 'F'): 10,
(53, 'G'): 10,
(53, 'H'): 10,
(53, 'I'): 10,
(53, 'J'): 10,
(53, 'K'): 10,
(53, 'L'): 10,
(53, 'M'): 10,
(53, 'N'): 10,
(53, 'O'): 10,
(53, 'P'): 10,
(53, 'Q'): 10,
(53, 'R'): 10,
(53, 'S'): 10,
(53, 'T'): 10,
(53, 'U'): 10,
(53, 'V'): 10,
(53, 'W'): 10,
(53, 'X'): 10,
(53, 'Y'): 10,
(53, 'Z'): 10,
(53, '_'): 10,
(53, 'a'): 10,
(53, 'b'): 10,
(53, 'c'): 10,
(53, 'd'): 10,
(53, 'e'): 10,
(53, 'f'): 10,
(53, 'g'): 10,
(53, 'h'): 10,
(53, 'i'): 10,
(53, 'j'): 10,
(53, 'k'): 10,
(53, 'l'): 10,
(53, 'm'): 10,
(53, 'n'): 10,
(53, 'o'): 10,
(53, 'p'): 10,
(53, 'q'): 10,
(53, 'r'): 10,
(53, 's'): 10,
(53, 't'): 10,
(53, 'u'): 10,
(53, 'v'): 10,
(53, 'w'): 10,
(53, 'x'): 10,
(53, 'y'): 10,
(53, 'z'): 10,
(54, '='): 63,
(56, '.'): 62,
(57, '='): 61,
(59, '='): 60,
(64, '>'): 66,
(67, '0'): 10,
(67, '1'): 10,
(67, '2'): 10,
(67, '3'): 10,
(67, '4'): 10,
(67, '5'): 10,
(67, '6'): 10,
(67, '7'): 10,
(67, '8'): 10,
(67, '9'): 10,
(67, 'A'): 10,
(67, 'B'): 10,
(67, 'C'): 10,
(67, 'D'): 10,
(67, 'E'): 10,
(67, 'F'): 10,
(67, 'G'): 10,
(67, 'H'): 10,
(67, 'I'): 10,
(67, 'J'): 10,
(67, 'K'): 10,
(67, 'L'): 10,
(67, 'M'): 10,
(67, 'N'): 10,
(67, 'O'): 10,
(67, 'P'): 10,
(67, 'Q'): 10,
(67, 'R'): 10,
(67, 'S'): 10,
(67, 'T'): 10,
(67, 'U'): 10,
(67, 'V'): 10,
(67, 'W'): 10,
(67, 'X'): 10,
(67, 'Y'): 10,
(67, 'Z'): 10,
(67, '_'): 10,
(67, 'a'): 10,
(67, 'b'): 10,
(67, 'c'): 10,
(67, 'd'): 10,
(67, 'e'): 10,
(67, 'f'): 10,
(67, 'g'): 10,
(67, 'h'): 10,
(67, 'i'): 10,
(67, 'j'): 10,
(67, 'k'): 10,
(67, 'l'): 10,
(67, 'm'): 68,
(67, 'n'): 10,
(67, 'o'): 10,
(67, 'p'): 10,
(67, 'q'): 10,
(67, 'r'): 10,
(67, 's'): 10,
(67, 't'): 10,
(67, 'u'): 10,
(67, 'v'): 10,
(67, 'w'): 10,
(67, 'x'): 10,
(67, 'y'): 10,
(67, 'z'): 10,
(68, '0'): 10,
(68, '1'): 10,
(68, '2'): 10,
(68, '3'): 10,
(68, '4'): 10,
(68, '5'): 10,
(68, '6'): 10,
(68, '7'): 10,
(68, '8'): 10,
(68, '9'): 10,
(68, 'A'): 10,
(68, 'B'): 10,
(68, 'C'): 10,
(68, 'D'): 10,
(68, 'E'): 10,
(68, 'F'): 10,
(68, 'G'): 10,
(68, 'H'): 10,
(68, 'I'): 10,
(68, 'J'): 10,
(68, 'K'): 10,
(68, 'L'): 10,
(68, 'M'): 10,
(68, 'N'): 10,
(68, 'O'): 10,
(68, 'P'): 10,
(68, 'Q'): 10,
(68, 'R'): 10,
(68, 'S'): 10,
(68, 'T'): 10,
(68, 'U'): 10,
(68, 'V'): 10,
(68, 'W'): 10,
(68, 'X'): 10,
(68, 'Y'): 10,
(68, 'Z'): 10,
(68, '_'): 10,
(68, 'a'): 10,
(68, 'b'): 10,
(68, 'c'): 10,
(68, 'd'): 10,
(68, 'e'): 10,
(68, 'f'): 10,
(68, 'g'): 10,
(68, 'h'): 10,
(68, 'i'): 10,
(68, 'j'): 10,
(68, 'k'): 10,
(68, 'l'): 10,
(68, 'm'): 10,
(68, 'n'): 10,
(68, 'o'): 10,
(68, 'p'): 10,
(68, 'q'): 10,
(68, 'r'): 10,
(68, 's'): 10,
(68, 't'): 10,
(68, 'u'): 10,
(68, 'v'): 10,
(68, 'w'): 10,
(68, 'x'): 10,
(68, 'y'): 10,
(68, 'z'): 10,
(69, '0'): 10,
(69, '1'): 10,
(69, '2'): 10,
(69, '3'): 10,
(69, '4'): 10,
(69, '5'): 10,
(69, '6'): 10,
(69, '7'): 10,
(69, '8'): 10,
(69, '9'): 10,
(69, 'A'): 10,
(69, 'B'): 10,
(69, 'C'): 10,
(69, 'D'): 10,
(69, 'E'): 10,
(69, 'F'): 10,
(69, 'G'): 10,
(69, 'H'): 10,
(69, 'I'): 10,
(69, 'J'): 10,
(69, 'K'): 10,
(69, 'L'): 10,
(69, 'M'): 10,
(69, 'N'): 10,
(69, 'O'): 10,
(69, 'P'): 10,
(69, 'Q'): 10,
(69, 'R'): 10,
(69, 'S'): 10,
(69, 'T'): 10,
(69, 'U'): 10,
(69, 'V'): 10,
(69, 'W'): 10,
(69, 'X'): 10,
(69, 'Y'): 10,
(69, 'Z'): 10,
(69, '_'): 10,
(69, 'a'): 10,
(69, 'b'): 10,
(69, 'c'): 10,
(69, 'd'): 10,
(69, 'e'): 10,
(69, 'f'): 10,
(69, 'g'): 10,
(69, 'h'): 10,
(69, 'i'): 10,
(69, 'j'): 10,
(69, 'k'): 10,
(69, 'l'): 10,
(69, 'm'): 10,
(69, 'n'): 10,
(69, 'o'): 70,
(69, 'p'): 10,
(69, 'q'): 10,
(69, 'r'): 10,
(69, 's'): 10,
(69, 't'): 10,
(69, 'u'): 10,
(69, 'v'): 10,
(69, 'w'): 10,
(69, 'x'): 10,
(69, 'y'): 10,
(69, 'z'): 10,
(70, '0'): 10,
(70, '1'): 10,
(70, '2'): 10,
(70, '3'): 10,
(70, '4'): 10,
(70, '5'): 10,
(70, '6'): 10,
(70, '7'): 10,
(70, '8'): 10,
(70, '9'): 10,
(70, 'A'): 10,
(70, 'B'): 10,
(70, 'C'): 10,
(70, 'D'): 10,
(70, 'E'): 10,
(70, 'F'): 10,
(70, 'G'): 10,
(70, 'H'): 10,
(70, 'I'): 10,
(70, 'J'): 10,
(70, 'K'): 10,
(70, 'L'): 10,
(70, 'M'): 10,
(70, 'N'): 10,
(70, 'O'): 10,
(70, 'P'): 10,
(70, 'Q'): 10,
(70, 'R'): 10,
(70, 'S'): 10,
(70, 'T'): 10,
(70, 'U'): 10,
(70, 'V'): 10,
(70, 'W'): 10,
(70, 'X'): 10,
(70, 'Y'): 10,
(70, 'Z'): 10,
(70, '_'): 10,
(70, 'a'): 10,
(70, 'b'): 10,
(70, 'c'): 71,
(70, 'd'): 10,
(70, 'e'): 10,
(70, 'f'): 10,
(70, 'g'): 10,
(70, 'h'): 10,
(70, 'i'): 10,
(70, 'j'): 10,
(70, 'k'): 10,
(70, 'l'): 10,
(70, 'm'): 10,
(70, 'n'): 10,
(70, 'o'): 10,
(70, 'p'): 10,
(70, 'q'): 10,
(70, 'r'): 10,
(70, 's'): 10,
(70, 't'): 10,
(70, 'u'): 10,
(70, 'v'): 10,
(70, 'w'): 10,
(70, 'x'): 10,
(70, 'y'): 10,
(70, 'z'): 10,
(71, '0'): 10,
(71, '1'): 10,
(71, '2'): 10,
(71, '3'): 10,
(71, '4'): 10,
(71, '5'): 10,
(71, '6'): 10,
(71, '7'): 10,
(71, '8'): 10,
(71, '9'): 10,
(71, 'A'): 10,
(71, 'B'): 10,
(71, 'C'): 10,
(71, 'D'): 10,
(71, 'E'): 10,
(71, 'F'): 10,
(71, 'G'): 10,
(71, 'H'): 10,
(71, 'I'): 10,
(71, 'J'): 10,
(71, 'K'): 10,
(71, 'L'): 10,
(71, 'M'): 10,
(71, 'N'): 10,
(71, 'O'): 10,
(71, 'P'): 10,
(71, 'Q'): 10,
(71, 'R'): 10,
(71, 'S'): 10,
(71, 'T'): 10,
(71, 'U'): 10,
(71, 'V'): 10,
(71, 'W'): 10,
(71, 'X'): 10,
(71, 'Y'): 10,
(71, 'Z'): 10,
(71, '_'): 10,
(71, 'a'): 10,
(71, 'b'): 10,
(71, 'c'): 10,
(71, 'd'): 10,
(71, 'e'): 10,
(71, 'f'): 10,
(71, 'g'): 10,
(71, 'h'): 10,
(71, 'i'): 10,
(71, 'j'): 10,
(71, 'k'): 72,
(71, 'l'): 10,
(71, 'm'): 10,
(71, 'n'): 10,
(71, 'o'): 10,
(71, 'p'): 10,
(71, 'q'): 10,
(71, 'r'): 10,
(71, 's'): 10,
(71, 't'): 10,
(71, 'u'): 10,
(71, 'v'): 10,
(71, 'w'): 10,
(71, 'x'): 10,
(71, 'y'): 10,
(71, 'z'): 10,
(72, '0'): 10,
(72, '1'): 10,
(72, '2'): 10,
(72, '3'): 10,
(72, '4'): 10,
(72, '5'): 10,
(72, '6'): 10,
(72, '7'): 10,
(72, '8'): 10,
(72, '9'): 10,
(72, 'A'): 10,
(72, 'B'): 10,
(72, 'C'): 10,
(72, 'D'): 10,
(72, 'E'): 10,
(72, 'F'): 10,
(72, 'G'): 10,
(72, 'H'): 10,
(72, 'I'): 10,
(72, 'J'): 10,
(72, 'K'): 10,
(72, 'L'): 10,
(72, 'M'): 10,
(72, 'N'): 10,
(72, 'O'): 10,
(72, 'P'): 10,
(72, 'Q'): 10,
(72, 'R'): 10,
(72, 'S'): 10,
(72, 'T'): 10,
(72, 'U'): 10,
(72, 'V'): 10,
(72, 'W'): 10,
(72, 'X'): 10,
(72, 'Y'): 10,
(72, 'Z'): 10,
(72, '_'): 10,
(72, 'a'): 10,
(72, 'b'): 10,
(72, 'c'): 10,
(72, 'd'): 10,
(72, 'e'): 10,
(72, 'f'): 10,
(72, 'g'): 10,
(72, 'h'): 10,
(72, 'i'): 10,
(72, 'j'): 10,
(72, 'k'): 10,
(72, 'l'): 10,
(72, 'm'): 10,
(72, 'n'): 10,
(72, 'o'): 10,
(72, 'p'): 10,
(72, 'q'): 10,
(72, 'r'): 10,
(72, 's'): 10,
(72, 't'): 10,
(72, 'u'): 10,
(72, 'v'): 10,
(72, 'w'): 10,
(72, 'x'): 10,
(72, 'y'): 10,
(72, 'z'): 10,
(80, '\x00'): 80,
(80, '\x01'): 80,
(80, '\x02'): 80,
(80, '\x03'): 80,
(80, '\x04'): 80,
(80, '\x05'): 80,
(80, '\x06'): 80,
(80, '\x07'): 80,
(80, '\x08'): 80,
(80, '\t'): 80,
(80, '\n'): 80,
(80, '\x0b'): 80,
(80, '\x0c'): 80,
(80, '\r'): 80,
(80, '\x0e'): 80,
(80, '\x0f'): 80,
(80, '\x10'): 80,
(80, '\x11'): 80,
(80, '\x12'): 80,
(80, '\x13'): 80,
(80, '\x14'): 80,
(80, '\x15'): 80,
(80, '\x16'): 80,
(80, '\x17'): 80,
(80, '\x18'): 80,
(80, '\x19'): 80,
(80, '\x1a'): 80,
(80, '\x1b'): 80,
(80, '\x1c'): 80,
(80, '\x1d'): 80,
(80, '\x1e'): 80,
(80, '\x1f'): 80,
(80, ' '): 80,
(80, '!'): 80,
(80, '"'): 80,
(80, '#'): 80,
(80, '$'): 80,
(80, '%'): 80,
(80, '&'): 80,
(80, "'"): 80,
(80, '('): 80,
(80, ')'): 80,
(80, '*'): 83,
(80, '+'): 80,
(80, ','): 80,
(80, '-'): 80,
(80, '.'): 80,
(80, '/'): 80,
(80, '0'): 80,
(80, '1'): 80,
(80, '2'): 80,
(80, '3'): 80,
(80, '4'): 80,
(80, '5'): 80,
(80, '6'): 80,
(80, '7'): 80,
(80, '8'): 80,
(80, '9'): 80,
(80, ':'): 80,
(80, ';'): 80,
(80, '<'): 80,
(80, '='): 80,
(80, '>'): 80,
(80, '?'): 80,
(80, '@'): 80,
(80, 'A'): 80,
(80, 'B'): 80,
(80, 'C'): 80,
(80, 'D'): 80,
(80, 'E'): 80,
(80, 'F'): 80,
(80, 'G'): 80,
(80, 'H'): 80,
(80, 'I'): 80,
(80, 'J'): 80,
(80, 'K'): 80,
(80, 'L'): 80,
(80, 'M'): 80,
(80, 'N'): 80,
(80, 'O'): 80,
(80, 'P'): 80,
(80, 'Q'): 80,
(80, 'R'): 80,
(80, 'S'): 80,
(80, 'T'): 80,
(80, 'U'): 80,
(80, 'V'): 80,
(80, 'W'): 80,
(80, 'X'): 80,
(80, 'Y'): 80,
(80, 'Z'): 80,
(80, '['): 80,
(80, '\\'): 80,
(80, ']'): 80,
(80, '^'): 80,
(80, '_'): 80,
(80, '`'): 80,
(80, 'a'): 80,
(80, 'b'): 80,
(80, 'c'): 80,
(80, 'd'): 80,
(80, 'e'): 80,
(80, 'f'): 80,
(80, 'g'): 80,
(80, 'h'): 80,
(80, 'i'): 80,
(80, 'j'): 80,
(80, 'k'): 80,
(80, 'l'): 80,
(80, 'm'): 80,
(80, 'n'): 80,
(80, 'o'): 80,
(80, 'p'): 80,
(80, 'q'): 80,
(80, 'r'): 80,
(80, 's'): 80,
(80, 't'): 80,
(80, 'u'): 80,
(80, 'v'): 80,
(80, 'w'): 80,
(80, 'x'): 80,
(80, 'y'): 80,
(80, 'z'): 80,
(80, '{'): 80,
(80, '|'): 80,
(80, '}'): 80,
(80, '~'): 80,
(80, '\x7f'): 80,
(80, '\x80'): 80,
(80, '\x81'): 80,
(80, '\x82'): 80,
(80, '\x83'): 80,
(80, '\x84'): 80,
(80, '\x85'): 80,
(80, '\x86'): 80,
(80, '\x87'): 80,
(80, '\x88'): 80,
(80, '\x89'): 80,
(80, '\x8a'): 80,
(80, '\x8b'): 80,
(80, '\x8c'): 80,
(80, '\x8d'): 80,
(80, '\x8e'): 80,
(80, '\x8f'): 80,
(80, '\x90'): 80,
(80, '\x91'): 80,
(80, '\x92'): 80,
(80, '\x93'): 80,
(80, '\x94'): 80,
(80, '\x95'): 80,
(80, '\x96'): 80,
(80, '\x97'): 80,
(80, '\x98'): 80,
(80, '\x99'): 80,
(80, '\x9a'): 80,
(80, '\x9b'): 80,
(80, '\x9c'): 80,
(80, '\x9d'): 80,
(80, '\x9e'): 80,
(80, '\x9f'): 80,
(80, '\xa0'): 80,
(80, '\xa1'): 80,
(80, '\xa2'): 80,
(80, '\xa3'): 80,
(80, '\xa4'): 80,
(80, '\xa5'): 80,
(80, '\xa6'): 80,
(80, '\xa7'): 80,
(80, '\xa8'): 80,
(80, '\xa9'): 80,
(80, '\xaa'): 80,
(80, '\xab'): 80,
(80, '\xac'): 80,
(80, '\xad'): 80,
(80, '\xae'): 80,
(80, '\xaf'): 80,
(80, '\xb0'): 80,
(80, '\xb1'): 80,
(80, '\xb2'): 80,
(80, '\xb3'): 80,
(80, '\xb4'): 80,
(80, '\xb5'): 80,
(80, '\xb6'): 80,
(80, '\xb7'): 80,
(80, '\xb8'): 80,
(80, '\xb9'): 80,
(80, '\xba'): 80,
(80, '\xbb'): 80,
(80, '\xbc'): 80,
(80, '\xbd'): 80,
(80, '\xbe'): 80,
(80, '\xbf'): 80,
(80, '\xc0'): 80,
(80, '\xc1'): 80,
(80, '\xc2'): 80,
(80, '\xc3'): 80,
(80, '\xc4'): 80,
(80, '\xc5'): 80,
(80, '\xc6'): 80,
(80, '\xc7'): 80,
(80, '\xc8'): 80,
(80, '\xc9'): 80,
(80, '\xca'): 80,
(80, '\xcb'): 80,
(80, '\xcc'): 80,
(80, '\xcd'): 80,
(80, '\xce'): 80,
(80, '\xcf'): 80,
(80, '\xd0'): 80,
(80, '\xd1'): 80,
(80, '\xd2'): 80,
(80, '\xd3'): 80,
(80, '\xd4'): 80,
(80, '\xd5'): 80,
(80, '\xd6'): 80,
(80, '\xd7'): 80,
(80, '\xd8'): 80,
(80, '\xd9'): 80,
(80, '\xda'): 80,
(80, '\xdb'): 80,
(80, '\xdc'): 80,
(80, '\xdd'): 80,
(80, '\xde'): 80,
(80, '\xdf'): 80,
(80, '\xe0'): 80,
(80, '\xe1'): 80,
(80, '\xe2'): 80,
(80, '\xe3'): 80,
(80, '\xe4'): 80,
(80, '\xe5'): 80,
(80, '\xe6'): 80,
(80, '\xe7'): 80,
(80, '\xe8'): 80,
(80, '\xe9'): 80,
(80, '\xea'): 80,
(80, '\xeb'): 80,
(80, '\xec'): 80,
(80, '\xed'): 80,
(80, '\xee'): 80,
(80, '\xef'): 80,
(80, '\xf0'): 80,
(80, '\xf1'): 80,
(80, '\xf2'): 80,
(80, '\xf3'): 80,
(80, '\xf4'): 80,
(80, '\xf5'): 80,
(80, '\xf6'): 80,
(80, '\xf7'): 80,
(80, '\xf8'): 80,
(80, '\xf9'): 80,
(80, '\xfa'): 80,
(80, '\xfb'): 80,
(80, '\xfc'): 80,
(80, '\xfd'): 80,
(80, '\xfe'): 80,
(80, '\xff'): 80,
(83, '\x00'): 80,
(83, '\x01'): 80,
(83, '\x02'): 80,
(83, '\x03'): 80,
(83, '\x04'): 80,
(83, '\x05'): 80,
(83, '\x06'): 80,
(83, '\x07'): 80,
(83, '\x08'): 80,
(83, '\t'): 80,
(83, '\n'): 80,
(83, '\x0b'): 80,
(83, '\x0c'): 80,
(83, '\r'): 80,
(83, '\x0e'): 80,
(83, '\x0f'): 80,
(83, '\x10'): 80,
(83, '\x11'): 80,
(83, '\x12'): 80,
(83, '\x13'): 80,
(83, '\x14'): 80,
(83, '\x15'): 80,
(83, '\x16'): 80,
(83, '\x17'): 80,
(83, '\x18'): 80,
(83, '\x19'): 80,
(83, '\x1a'): 80,
(83, '\x1b'): 80,
(83, '\x1c'): 80,
(83, '\x1d'): 80,
(83, '\x1e'): 80,
(83, '\x1f'): 80,
(83, ' '): 80,
(83, '!'): 80,
(83, '"'): 80,
(83, '#'): 80,
(83, '$'): 80,
(83, '%'): 80,
(83, '&'): 80,
(83, "'"): 80,
(83, '('): 80,
(83, ')'): 80,
(83, '*'): 84,
(83, '+'): 80,
(83, ','): 80,
(83, '-'): 80,
(83, '.'): 80,
(83, '/'): 1,
(83, '0'): 80,
(83, '1'): 80,
(83, '2'): 80,
(83, '3'): 80,
(83, '4'): 80,
(83, '5'): 80,
(83, '6'): 80,
(83, '7'): 80,
(83, '8'): 80,
(83, '9'): 80,
(83, ':'): 80,
(83, ';'): 80,
(83, '<'): 80,
(83, '='): 80,
(83, '>'): 80,
(83, '?'): 80,
(83, '@'): 80,
(83, 'A'): 80,
(83, 'B'): 80,
(83, 'C'): 80,
(83, 'D'): 80,
(83, 'E'): 80,
(83, 'F'): 80,
(83, 'G'): 80,
(83, 'H'): 80,
(83, 'I'): 80,
(83, 'J'): 80,
(83, 'K'): 80,
(83, 'L'): 80,
(83, 'M'): 80,
(83, 'N'): 80,
(83, 'O'): 80,
(83, 'P'): 80,
(83, 'Q'): 80,
(83, 'R'): 80,
(83, 'S'): 80,
(83, 'T'): 80,
(83, 'U'): 80,
(83, 'V'): 80,
(83, 'W'): 80,
(83, 'X'): 80,
(83, 'Y'): 80,
(83, 'Z'): 80,
(83, '['): 80,
(83, '\\'): 80,
(83, ']'): 80,
(83, '^'): 80,
(83, '_'): 80,
(83, '`'): 80,
(83, 'a'): 80,
(83, 'b'): 80,
(83, 'c'): 80,
(83, 'd'): 80,
(83, 'e'): 80,
(83, 'f'): 80,
(83, 'g'): 80,
(83, 'h'): 80,
(83, 'i'): 80,
(83, 'j'): 80,
(83, 'k'): 80,
(83, 'l'): 80,
(83, 'm'): 80,
(83, 'n'): 80,
(83, 'o'): 80,
(83, 'p'): 80,
(83, 'q'): 80,
(83, 'r'): 80,
(83, 's'): 80,
(83, 't'): 80,
(83, 'u'): 80,
(83, 'v'): 80,
(83, 'w'): 80,
(83, 'x'): 80,
(83, 'y'): 80,
(83, 'z'): 80,
(83, '{'): 80,
(83, '|'): 80,
(83, '}'): 80,
(83, '~'): 80,
(83, '\x7f'): 80,
(83, '\x80'): 80,
(83, '\x81'): 80,
(83, '\x82'): 80,
(83, '\x83'): 80,
(83, '\x84'): 80,
(83, '\x85'): 80,
(83, '\x86'): 80,
(83, '\x87'): 80,
(83, '\x88'): 80,
(83, '\x89'): 80,
(83, '\x8a'): 80,
(83, '\x8b'): 80,
(83, '\x8c'): 80,
(83, '\x8d'): 80,
(83, '\x8e'): 80,
(83, '\x8f'): 80,
(83, '\x90'): 80,
(83, '\x91'): 80,
(83, '\x92'): 80,
(83, '\x93'): 80,
(83, '\x94'): 80,
(83, '\x95'): 80,
(83, '\x96'): 80,
(83, '\x97'): 80,
(83, '\x98'): 80,
(83, '\x99'): 80,
(83, '\x9a'): 80,
(83, '\x9b'): 80,
(83, '\x9c'): 80,
(83, '\x9d'): 80,
(83, '\x9e'): 80,
(83, '\x9f'): 80,
(83, '\xa0'): 80,
(83, '\xa1'): 80,
(83, '\xa2'): 80,
(83, '\xa3'): 80,
(83, '\xa4'): 80,
(83, '\xa5'): 80,
(83, '\xa6'): 80,
(83, '\xa7'): 80,
(83, '\xa8'): 80,
(83, '\xa9'): 80,
(83, '\xaa'): 80,
(83, '\xab'): 80,
(83, '\xac'): 80,
(83, '\xad'): 80,
(83, '\xae'): 80,
(83, '\xaf'): 80,
(83, '\xb0'): 80,
(83, '\xb1'): 80,
(83, '\xb2'): 80,
(83, '\xb3'): 80,
(83, '\xb4'): 80,
(83, '\xb5'): 80,
(83, '\xb6'): 80,
(83, '\xb7'): 80,
(83, '\xb8'): 80,
(83, '\xb9'): 80,
(83, '\xba'): 80,
(83, '\xbb'): 80,
(83, '\xbc'): 80,
(83, '\xbd'): 80,
(83, '\xbe'): 80,
(83, '\xbf'): 80,
(83, '\xc0'): 80,
(83, '\xc1'): 80,
(83, '\xc2'): 80,
(83, '\xc3'): 80,
(83, '\xc4'): 80,
(83, '\xc5'): 80,
(83, '\xc6'): 80,
(83, '\xc7'): 80,
(83, '\xc8'): 80,
(83, '\xc9'): 80,
(83, '\xca'): 80,
(83, '\xcb'): 80,
(83, '\xcc'): 80,
(83, '\xcd'): 80,
(83, '\xce'): 80,
(83, '\xcf'): 80,
(83, '\xd0'): 80,
(83, '\xd1'): 80,
(83, '\xd2'): 80,
(83, '\xd3'): 80,
(83, '\xd4'): 80,
(83, '\xd5'): 80,
(83, '\xd6'): 80,
(83, '\xd7'): 80,
(83, '\xd8'): 80,
(83, '\xd9'): 80,
(83, '\xda'): 80,
(83, '\xdb'): 80,
(83, '\xdc'): 80,
(83, '\xdd'): 80,
(83, '\xde'): 80,
(83, '\xdf'): 80,
(83, '\xe0'): 80,
(83, '\xe1'): 80,
(83, '\xe2'): 80,
(83, '\xe3'): 80,
(83, '\xe4'): 80,
(83, '\xe5'): 80,
(83, '\xe6'): 80,
(83, '\xe7'): 80,
(83, '\xe8'): 80,
(83, '\xe9'): 80,
(83, '\xea'): 80,
(83, '\xeb'): 80,
(83, '\xec'): 80,
(83, '\xed'): 80,
(83, '\xee'): 80,
(83, '\xef'): 80,
(83, '\xf0'): 80,
(83, '\xf1'): 80,
(83, '\xf2'): 80,
(83, '\xf3'): 80,
(83, '\xf4'): 80,
(83, '\xf5'): 80,
(83, '\xf6'): 80,
(83, '\xf7'): 80,
(83, '\xf8'): 80,
(83, '\xf9'): 80,
(83, '\xfa'): 80,
(83, '\xfb'): 80,
(83, '\xfc'): 80,
(83, '\xfd'): 80,
(83, '\xfe'): 80,
(83, '\xff'): 80,
(84, '\x00'): 80,
(84, '\x01'): 80,
(84, '\x02'): 80,
(84, '\x03'): 80,
(84, '\x04'): 80,
(84, '\x05'): 80,
(84, '\x06'): 80,
(84, '\x07'): 80,
(84, '\x08'): 80,
(84, '\t'): 80,
(84, '\n'): 80,
(84, '\x0b'): 80,
(84, '\x0c'): 80,
(84, '\r'): 80,
(84, '\x0e'): 80,
(84, '\x0f'): 80,
(84, '\x10'): 80,
(84, '\x11'): 80,
(84, '\x12'): 80,
(84, '\x13'): 80,
(84, '\x14'): 80,
(84, '\x15'): 80,
(84, '\x16'): 80,
(84, '\x17'): 80,
(84, '\x18'): 80,
(84, '\x19'): 80,
(84, '\x1a'): 80,
(84, '\x1b'): 80,
(84, '\x1c'): 80,
(84, '\x1d'): 80,
(84, '\x1e'): 80,
(84, '\x1f'): 80,
(84, ' '): 80,
(84, '!'): 80,
(84, '"'): 80,
(84, '#'): 80,
(84, '$'): 80,
(84, '%'): 80,
(84, '&'): 80,
(84, "'"): 80,
(84, '('): 80,
(84, ')'): 80,
(84, '*'): 83,
(84, '+'): 80,
(84, ','): 80,
(84, '-'): 80,
(84, '.'): 80,
(84, '/'): 85,
(84, '0'): 80,
(84, '1'): 80,
(84, '2'): 80,
(84, '3'): 80,
(84, '4'): 80,
(84, '5'): 80,
(84, '6'): 80,
(84, '7'): 80,
(84, '8'): 80,
(84, '9'): 80,
(84, ':'): 80,
(84, ';'): 80,
(84, '<'): 80,
(84, '='): 80,
(84, '>'): 80,
(84, '?'): 80,
(84, '@'): 80,
(84, 'A'): 80,
(84, 'B'): 80,
(84, 'C'): 80,
(84, 'D'): 80,
(84, 'E'): 80,
(84, 'F'): 80,
(84, 'G'): 80,
(84, 'H'): 80,
(84, 'I'): 80,
(84, 'J'): 80,
(84, 'K'): 80,
(84, 'L'): 80,
(84, 'M'): 80,
(84, 'N'): 80,
(84, 'O'): 80,
(84, 'P'): 80,
(84, 'Q'): 80,
(84, 'R'): 80,
(84, 'S'): 80,
(84, 'T'): 80,
(84, 'U'): 80,
(84, 'V'): 80,
(84, 'W'): 80,
(84, 'X'): 80,
(84, 'Y'): 80,
(84, 'Z'): 80,
(84, '['): 80,
(84, '\\'): 80,
(84, ']'): 80,
(84, '^'): 80,
(84, '_'): 80,
(84, '`'): 80,
(84, 'a'): 80,
(84, 'b'): 80,
(84, 'c'): 80,
(84, 'd'): 80,
(84, 'e'): 80,
(84, 'f'): 80,
(84, 'g'): 80,
(84, 'h'): 80,
(84, 'i'): 80,
(84, 'j'): 80,
(84, 'k'): 80,
(84, 'l'): 80,
(84, 'm'): 80,
(84, 'n'): 80,
(84, 'o'): 80,
(84, 'p'): 80,
(84, 'q'): 80,
(84, 'r'): 80,
(84, 's'): 80,
(84, 't'): 80,
(84, 'u'): 80,
(84, 'v'): 80,
(84, 'w'): 80,
(84, 'x'): 80,
(84, 'y'): 80,
(84, 'z'): 80,
(84, '{'): 80,
(84, '|'): 80,
(84, '}'): 80,
(84, '~'): 80,
(84, '\x7f'): 80,
(84, '\x80'): 80,
(84, '\x81'): 80,
(84, '\x82'): 80,
(84, '\x83'): 80,
(84, '\x84'): 80,
(84, '\x85'): 80,
(84, '\x86'): 80,
(84, '\x87'): 80,
(84, '\x88'): 80,
(84, '\x89'): 80,
(84, '\x8a'): 80,
(84, '\x8b'): 80,
(84, '\x8c'): 80,
(84, '\x8d'): 80,
(84, '\x8e'): 80,
(84, '\x8f'): 80,
(84, '\x90'): 80,
(84, '\x91'): 80,
(84, '\x92'): 80,
(84, '\x93'): 80,
(84, '\x94'): 80,
(84, '\x95'): 80,
(84, '\x96'): 80,
(84, '\x97'): 80,
(84, '\x98'): 80,
(84, '\x99'): 80,
(84, '\x9a'): 80,
(84, '\x9b'): 80,
(84, '\x9c'): 80,
(84, '\x9d'): 80,
(84, '\x9e'): 80,
(84, '\x9f'): 80,
(84, '\xa0'): 80,
(84, '\xa1'): 80,
(84, '\xa2'): 80,
(84, '\xa3'): 80,
(84, '\xa4'): 80,
(84, '\xa5'): 80,
(84, '\xa6'): 80,
(84, '\xa7'): 80,
(84, '\xa8'): 80,
(84, '\xa9'): 80,
(84, '\xaa'): 80,
(84, '\xab'): 80,
(84, '\xac'): 80,
(84, '\xad'): 80,
(84, '\xae'): 80,
(84, '\xaf'): 80,
(84, '\xb0'): 80,
(84, '\xb1'): 80,
(84, '\xb2'): 80,
(84, '\xb3'): 80,
(84, '\xb4'): 80,
(84, '\xb5'): 80,
(84, '\xb6'): 80,
(84, '\xb7'): 80,
(84, '\xb8'): 80,
(84, '\xb9'): 80,
(84, '\xba'): 80,
(84, '\xbb'): 80,
(84, '\xbc'): 80,
(84, '\xbd'): 80,
(84, '\xbe'): 80,
(84, '\xbf'): 80,
(84, '\xc0'): 80,
(84, '\xc1'): 80,
(84, '\xc2'): 80,
(84, '\xc3'): 80,
(84, '\xc4'): 80,
(84, '\xc5'): 80,
(84, '\xc6'): 80,
(84, '\xc7'): 80,
(84, '\xc8'): 80,
(84, '\xc9'): 80,
(84, '\xca'): 80,
(84, '\xcb'): 80,
(84, '\xcc'): 80,
(84, '\xcd'): 80,
(84, '\xce'): 80,
(84, '\xcf'): 80,
(84, '\xd0'): 80,
(84, '\xd1'): 80,
(84, '\xd2'): 80,
(84, '\xd3'): 80,
(84, '\xd4'): 80,
(84, '\xd5'): 80,
(84, '\xd6'): 80,
(84, '\xd7'): 80,
(84, '\xd8'): 80,
(84, '\xd9'): 80,
(84, '\xda'): 80,
(84, '\xdb'): 80,
(84, '\xdc'): 80,
(84, '\xdd'): 80,
(84, '\xde'): 80,
(84, '\xdf'): 80,
(84, '\xe0'): 80,
(84, '\xe1'): 80,
(84, '\xe2'): 80,
(84, '\xe3'): 80,
(84, '\xe4'): 80,
(84, '\xe5'): 80,
(84, '\xe6'): 80,
(84, '\xe7'): 80,
(84, '\xe8'): 80,
(84, '\xe9'): 80,
(84, '\xea'): 80,
(84, '\xeb'): 80,
(84, '\xec'): 80,
(84, '\xed'): 80,
(84, '\xee'): 80,
(84, '\xef'): 80,
(84, '\xf0'): 80,
(84, '\xf1'): 80,
(84, '\xf2'): 80,
(84, '\xf3'): 80,
(84, '\xf4'): 80,
(84, '\xf5'): 80,
(84, '\xf6'): 80,
(84, '\xf7'): 80,
(84, '\xf8'): 80,
(84, '\xf9'): 80,
(84, '\xfa'): 80,
(84, '\xfb'): 80,
(84, '\xfc'): 80,
(84, '\xfd'): 80,
(84, '\xfe'): 80,
(84, '\xff'): 80,
(85, '\x00'): 80,
(85, '\x01'): 80,
(85, '\x02'): 80,
(85, '\x03'): 80,
(85, '\x04'): 80,
(85, '\x05'): 80,
(85, '\x06'): 80,
(85, '\x07'): 80,
(85, '\x08'): 80,
(85, '\t'): 80,
(85, '\n'): 80,
(85, '\x0b'): 80,
(85, '\x0c'): 80,
(85, '\r'): 80,
(85, '\x0e'): 80,
(85, '\x0f'): 80,
(85, '\x10'): 80,
(85, '\x11'): 80,
(85, '\x12'): 80,
(85, '\x13'): 80,
(85, '\x14'): 80,
(85, '\x15'): 80,
(85, '\x16'): 80,
(85, '\x17'): 80,
(85, '\x18'): 80,
(85, '\x19'): 80,
(85, '\x1a'): 80,
(85, '\x1b'): 80,
(85, '\x1c'): 80,
(85, '\x1d'): 80,
(85, '\x1e'): 80,
(85, '\x1f'): 80,
(85, ' '): 80,
(85, '!'): 80,
(85, '"'): 80,
(85, '#'): 80,
(85, '$'): 80,
(85, '%'): 80,
(85, '&'): 80,
(85, "'"): 80,
(85, '('): 80,
(85, ')'): 80,
(85, '*'): 83,
(85, '+'): 80,
(85, ','): 80,
(85, '-'): 80,
(85, '.'): 80,
(85, '/'): 80,
(85, '0'): 80,
(85, '1'): 80,
(85, '2'): 80,
(85, '3'): 80,
(85, '4'): 80,
(85, '5'): 80,
(85, '6'): 80,
(85, '7'): 80,
(85, '8'): 80,
(85, '9'): 80,
(85, ':'): 80,
(85, ';'): 80,
(85, '<'): 80,
(85, '='): 80,
(85, '>'): 80,
(85, '?'): 80,
(85, '@'): 80,
(85, 'A'): 80,
(85, 'B'): 80,
(85, 'C'): 80,
(85, 'D'): 80,
(85, 'E'): 80,
(85, 'F'): 80,
(85, 'G'): 80,
(85, 'H'): 80,
(85, 'I'): 80,
(85, 'J'): 80,
(85, 'K'): 80,
(85, 'L'): 80,
(85, 'M'): 80,
(85, 'N'): 80,
(85, 'O'): 80,
(85, 'P'): 80,
(85, 'Q'): 80,
(85, 'R'): 80,
(85, 'S'): 80,
(85, 'T'): 80,
(85, 'U'): 80,
(85, 'V'): 80,
(85, 'W'): 80,
(85, 'X'): 80,
(85, 'Y'): 80,
(85, 'Z'): 80,
(85, '['): 80,
(85, '\\'): 80,
(85, ']'): 80,
(85, '^'): 80,
(85, '_'): 80,
(85, '`'): 80,
(85, 'a'): 80,
(85, 'b'): 80,
(85, 'c'): 80,
(85, 'd'): 80,
(85, 'e'): 80,
(85, 'f'): 80,
(85, 'g'): 80,
(85, 'h'): 80,
(85, 'i'): 80,
(85, 'j'): 80,
(85, 'k'): 80,
(85, 'l'): 80,
(85, 'm'): 80,
(85, 'n'): 80,
(85, 'o'): 80,
(85, 'p'): 80,
(85, 'q'): 80,
(85, 'r'): 80,
(85, 's'): 80,
(85, 't'): 80,
(85, 'u'): 80,
(85, 'v'): 80,
(85, 'w'): 80,
(85, 'x'): 80,
(85, 'y'): 80,
(85, 'z'): 80,
(85, '{'): 80,
(85, '|'): 80,
(85, '}'): 80,
(85, '~'): 80,
(85, '\x7f'): 80,
(85, '\x80'): 80,
(85, '\x81'): 80,
(85, '\x82'): 80,
(85, '\x83'): 80,
(85, '\x84'): 80,
(85, '\x85'): 80,
(85, '\x86'): 80,
(85, '\x87'): 80,
(85, '\x88'): 80,
(85, '\x89'): 80,
(85, '\x8a'): 80,
(85, '\x8b'): 80,
(85, '\x8c'): 80,
(85, '\x8d'): 80,
(85, '\x8e'): 80,
(85, '\x8f'): 80,
(85, '\x90'): 80,
(85, '\x91'): 80,
(85, '\x92'): 80,
(85, '\x93'): 80,
(85, '\x94'): 80,
(85, '\x95'): 80,
(85, '\x96'): 80,
(85, '\x97'): 80,
(85, '\x98'): 80,
(85, '\x99'): 80,
(85, '\x9a'): 80,
(85, '\x9b'): 80,
(85, '\x9c'): 80,
(85, '\x9d'): 80,
(85, '\x9e'): 80,
(85, '\x9f'): 80,
(85, '\xa0'): 80,
(85, '\xa1'): 80,
(85, '\xa2'): 80,
(85, '\xa3'): 80,
(85, '\xa4'): 80,
(85, '\xa5'): 80,
(85, '\xa6'): 80,
(85, '\xa7'): 80,
(85, '\xa8'): 80,
(85, '\xa9'): 80,
(85, '\xaa'): 80,
(85, '\xab'): 80,
(85, '\xac'): 80,
(85, '\xad'): 80,
(85, '\xae'): 80,
(85, '\xaf'): 80,
(85, '\xb0'): 80,
(85, '\xb1'): 80,
(85, '\xb2'): 80,
(85, '\xb3'): 80,
(85, '\xb4'): 80,
(85, '\xb5'): 80,
(85, '\xb6'): 80,
(85, '\xb7'): 80,
(85, '\xb8'): 80,
(85, '\xb9'): 80,
(85, '\xba'): 80,
(85, '\xbb'): 80,
(85, '\xbc'): 80,
(85, '\xbd'): 80,
(85, '\xbe'): 80,
(85, '\xbf'): 80,
(85, '\xc0'): 80,
(85, '\xc1'): 80,
(85, '\xc2'): 80,
(85, '\xc3'): 80,
(85, '\xc4'): 80,
(85, '\xc5'): 80,
(85, '\xc6'): 80,
(85, '\xc7'): 80,
(85, '\xc8'): 80,
(85, '\xc9'): 80,
(85, '\xca'): 80,
(85, '\xcb'): 80,
(85, '\xcc'): 80,
(85, '\xcd'): 80,
(85, '\xce'): 80,
(85, '\xcf'): 80,
(85, '\xd0'): 80,
(85, '\xd1'): 80,
(85, '\xd2'): 80,
(85, '\xd3'): 80,
(85, '\xd4'): 80,
(85, '\xd5'): 80,
(85, '\xd6'): 80,
(85, '\xd7'): 80,
(85, '\xd8'): 80,
(85, '\xd9'): 80,
(85, '\xda'): 80,
(85, '\xdb'): 80,
(85, '\xdc'): 80,
(85, '\xdd'): 80,
(85, '\xde'): 80,
(85, '\xdf'): 80,
(85, '\xe0'): 80,
(85, '\xe1'): 80,
(85, '\xe2'): 80,
(85, '\xe3'): 80,
(85, '\xe4'): 80,
(85, '\xe5'): 80,
(85, '\xe6'): 80,
(85, '\xe7'): 80,
(85, '\xe8'): 80,
(85, '\xe9'): 80,
(85, '\xea'): 80,
(85, '\xeb'): 80,
(85, '\xec'): 80,
(85, '\xed'): 80,
(85, '\xee'): 80,
(85, '\xef'): 80,
(85, '\xf0'): 80,
(85, '\xf1'): 80,
(85, '\xf2'): 80,
(85, '\xf3'): 80,
(85, '\xf4'): 80,
(85, '\xf5'): 80,
(85, '\xf6'): 80,
(85, '\xf7'): 80,
(85, '\xf8'): 80,
(85, '\xf9'): 80,
(85, '\xfa'): 80,
(85, '\xfb'): 80,
(85, '\xfc'): 80,
(85, '\xfd'): 80,
(85, '\xfe'): 80,
(85, '\xff'): 80,
(86, '0'): 10,
(86, '1'): 10,
(86, '2'): 10,
(86, '3'): 10,
(86, '4'): 10,
(86, '5'): 10,
(86, '6'): 10,
(86, '7'): 10,
(86, '8'): 10,
(86, '9'): 10,
(86, 'A'): 10,
(86, 'B'): 10,
(86, 'C'): 10,
(86, 'D'): 10,
(86, 'E'): 10,
(86, 'F'): 10,
(86, 'G'): 10,
(86, 'H'): 10,
(86, 'I'): 10,
(86, 'J'): 10,
(86, 'K'): 10,
(86, 'L'): 10,
(86, 'M'): 10,
(86, 'N'): 10,
(86, 'O'): 10,
(86, 'P'): 10,
(86, 'Q'): 10,
(86, 'R'): 10,
(86, 'S'): 10,
(86, 'T'): 10,
(86, 'U'): 10,
(86, 'V'): 10,
(86, 'W'): 10,
(86, 'X'): 10,
(86, 'Y'): 10,
(86, 'Z'): 10,
(86, '_'): 10,
(86, 'a'): 10,
(86, 'b'): 10,
(86, 'c'): 10,
(86, 'd'): 10,
(86, 'e'): 10,
(86, 'f'): 10,
(86, 'g'): 10,
(86, 'h'): 10,
(86, 'i'): 10,
(86, 'j'): 10,
(86, 'k'): 10,
(86, 'l'): 10,
(86, 'm'): 10,
(86, 'n'): 10,
(86, 'o'): 10,
(86, 'p'): 10,
(86, 'q'): 10,
(86, 'r'): 87,
(86, 's'): 10,
(86, 't'): 10,
(86, 'u'): 10,
(86, 'v'): 10,
(86, 'w'): 10,
(86, 'x'): 10,
(86, 'y'): 10,
(86, 'z'): 10,
(87, '0'): 10,
(87, '1'): 10,
(87, '2'): 10,
(87, '3'): 10,
(87, '4'): 10,
(87, '5'): 10,
(87, '6'): 10,
(87, '7'): 10,
(87, '8'): 10,
(87, '9'): 10,
(87, 'A'): 10,
(87, 'B'): 10,
(87, 'C'): 10,
(87, 'D'): 10,
(87, 'E'): 10,
(87, 'F'): 10,
(87, 'G'): 10,
(87, 'H'): 10,
(87, 'I'): 10,
(87, 'J'): 10,
(87, 'K'): 10,
(87, 'L'): 10,
(87, 'M'): 10,
(87, 'N'): 10,
(87, 'O'): 10,
(87, 'P'): 10,
(87, 'Q'): 10,
(87, 'R'): 10,
(87, 'S'): 10,
(87, 'T'): 10,
(87, 'U'): 10,
(87, 'V'): 10,
(87, 'W'): 10,
(87, 'X'): 10,
(87, 'Y'): 10,
(87, 'Z'): 10,
(87, '_'): 10,
(87, 'a'): 10,
(87, 'b'): 10,
(87, 'c'): 10,
(87, 'd'): 10,
(87, 'e'): 10,
(87, 'f'): 10,
(87, 'g'): 10,
(87, 'h'): 10,
(87, 'i'): 10,
(87, 'j'): 10,
(87, 'k'): 10,
(87, 'l'): 10,
(87, 'm'): 10,
(87, 'n'): 10,
(87, 'o'): 10,
(87, 'p'): 10,
(87, 'q'): 10,
(87, 'r'): 10,
(87, 's'): 10,
(87, 't'): 10,
(87, 'u'): 10,
(87, 'v'): 10,
(87, 'w'): 10,
(87, 'x'): 10,
(87, 'y'): 10,
(87, 'z'): 10,
(89, '='): 91,
(92, '<'): 96,
(94, '='): 95,
(98, '0'): 99,
(98, '1'): 99,
(98, '2'): 99,
(98, '3'): 99,
(98, '4'): 99,
(98, '5'): 99,
(98, '6'): 99,
(98, '7'): 99,
(98, '8'): 99,
(98, '9'): 99,
(99, '0'): 99,
(99, '1'): 99,
(99, '2'): 99,
(99, '3'): 99,
(99, '4'): 99,
(99, '5'): 99,
(99, '6'): 99,
(99, '7'): 99,
(99, '8'): 99,
(99, '9'): 99,
(99, 'E'): 100,
(99, 'e'): 100,
(100, '+'): 101,
(100, '-'): 101,
(100, '0'): 102,
(100, '1'): 102,
(100, '2'): 102,
(100, '3'): 102,
(100, '4'): 102,
(100, '5'): 102,
(100, '6'): 102,
(100, '7'): 102,
(100, '8'): 102,
(100, '9'): 102,
(101, '0'): 102,
(101, '1'): 102,
(101, '2'): 102,
(101, '3'): 102,
(101, '4'): 102,
(101, '5'): 102,
(101, '6'): 102,
(101, '7'): 102,
(101, '8'): 102,
(101, '9'): 102,
(102, '0'): 102,
(102, '1'): 102,
(102, '2'): 102,
(102, '3'): 102,
(102, '4'): 102,
(102, '5'): 102,
(102, '6'): 102,
(102, '7'): 102,
(102, '8'): 102,
(102, '9'): 102},
set([1,
2,
3,
4,
5,
6,
8,
9,
10,
11,
12,
14,
15,
16,
17,
18,
19,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
55,
58,
60,
61,
62,
63,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
81,
82,
85,
86,
87,
88,
89,
90,
91,
93,
94,
95,
96,
97,
99,
102]),
set([1,
2,
3,
4,
5,
6,
8,
9,
10,
11,
12,
14,
15,
16,
17,
18,
19,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
55,
58,
60,
61,
62,
63,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
81,
82,
85,
86,
87,
88,
89,
90,
91,
93,
94,
95,
96,
97,
99,
102]),
['0, 0, 0, 0, start|, 0, start|, 0, 0, 0, 0, 0, start|, 0, 0, 0, 0, start|, 0, 0, 0, 0, 0, 0, start|, 0, start|, 0, start|, 0, 0, start|, 0, 0, 0, 0, 0, 0, start|, 0, start|, start|, 0, 0, start|, 0, start|, start|, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0',
'IGNORE',
'(',
'ATOM',
'NUMBER',
'NUMBER',
'ATOM',
'1, 1, 1, 1',
'VAR',
'ATOM',
'ATOM',
'ATOM',
'|',
'0, start|, 0, final*, start*, 0, 1, final*, 0, final|, start|, 0, 1, final*, start*, 0, final*, 0, 1, final|, start|, 0, final*, start*, 0, final*',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'[',
'{',
'1, final*, 0, start|, 0, final*, start*, 0, final*, 0, final|, start|, 0, 1, final*, start*, 0, final*, 0, 1, final|, start|, 0, final*, start*, 0',
'ATOM',
'.',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'IGNORE',
')',
'ATOM',
'ATOM',
']',
'ATOM',
'ATOM',
'}',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'2',
'ATOM',
'2',
'2',
'ATOM',
'2',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'2',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'STRING',
'ATOM',
'ATOM',
'0, final*, start*, 2, final*, 0, start|, 0, final*, start*, final*, 0, final*, start*, 0, final*, 0, final|, start|, 0, 1, final*, start*, final*, 0, final*, start*, 0, final*, 0, 1, final|, start|, 0, final*, start*, final*, 0, final|, 1, final*, 0, start|, 0, final*, start*, final*, start*, 0, final*, 0, final*, 1, final|, final*, 0, start|, 0, final*, start*, final*, start*, 0, final*, 0, final*, final*, 0, final|, start|, 0, 1, final*, start*, final*, start*, 0, final*, 0, final*, 0, 1, final|, start|, 0, final*, start*, final*, start*, 0, final*, 0',
'ATOM',
'ATOM',
'0, start|, 0, final*, 1, final*, 0, final*, start*, 0, 1, 0, start|, 0, final*, 1, final*, 0, 1, final*, start*, 0, 1',
'final|, 1, final*, 0, start|, 0, final*, start*, final*, start*, final*, 0, final*, 0, 1, final*, start*, 0, final*, 0, final*',
'IGNORE',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'2',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'ATOM',
'1, 0',
'FLOAT',
'final|, 1, final*, 0, final|, start|, 0, start|, 0, 0, final*, 1, final|, final*, 0, final|, start|, 0, start|, 0, 0, final*',
'final|, 0, 1, final|, final*, final|, 0, final|, final*, 1',
'FLOAT']), {'IGNORE': None})
# generated code between this line and its other occurence
if __name__ == '__main__':
f = py.path.local(__file__)
oldcontent = f.read()
s = "# GENERATED CODE BETWEEN THIS LINE AND ITS OTHER OCCURENCE\n".lower()
pre, gen, after = oldcontent.split(s)
lexer, parser_fact, parser_query, basic_rules = make_all()
newcontent = ("%s%s\nparser_fact = %r\nparser_query = %r\n%s\n"
"\n%s%s") % (
pre, s, parser_fact, parser_query, lexer.get_dummy_repr(),
s, after)
print newcontent
f.write(newcontent)
|
"""
This module will test the start command
"""
import shutil
from pathlib import Path
from controller import colors
from controller.app import Configuration
from controller.deploy.docker import Docker
from tests import (
Capture,
create_project,
exec_command,
execute_outside,
init_project,
pull_images,
start_registry,
)
def test_all(capfd: Capture) -> None:
execute_outside(capfd, "start")
if not Configuration.swarm_mode:
execute_outside(capfd, "stop")
project_name = "first"
create_project(
capfd=capfd,
name=project_name,
auth="neo4j",
frontend="angular",
)
init_project(capfd)
if Configuration.swarm_mode:
exec_command(
capfd,
"start",
"Registry 127.0.0.1:5000 not reachable.",
)
start_registry(capfd)
exec_command(
capfd,
"start backend invalid",
"No such service: invalid",
)
exec_command(
capfd,
"start backend",
f"image, execute {colors.RED}rapydo pull backend",
)
pull_images(capfd)
docker = Docker()
if Configuration.swarm_mode:
# Deploy a sub-stack
exec_command(
capfd,
"start backend",
"Enabled services: backend",
"Stack started",
)
# Only backend is expected to be running
assert docker.get_container("backend") is not None
assert docker.get_container("neo4j") is None
# Once started a stack in swarm mode, it's not possible
# to re-deploy another stack
# exec_command(
# capfd,
# "start",
# "A stack is already running",
# f"Stop it with {colors.RED}rapydo remove{colors.RESET} "
# "if you want to start a new stack",
# )
# Deploy an additional sub-stack
exec_command(
capfd,
"start neo4j",
"Enabled services: neo4j",
"Stack started",
)
# In swarm mode new stack replaces the previous
# => Only neo4j is expected to be running
assert docker.get_container("backend") is None
assert docker.get_container("neo4j") is not None
exec_command(
capfd,
"remove",
"Stack removed",
)
# Deploy the full stack
exec_command(
capfd,
"start",
"Stack started",
)
# Now both backend and neo4j are expected to be running
assert docker.get_container("backend") is not None
assert docker.get_container("neo4j") is not None
# ############################
# Verify bind volumes checks #
# ############################
exec_command(
capfd,
"remove",
"Stack removed",
)
data_folder = Path("data", project_name)
karma_folder = data_folder.joinpath("karma")
# Delete data/project_name/karma and it will be recreated
assert karma_folder.exists()
shutil.rmtree(karma_folder)
assert not karma_folder.exists()
# set the data folder read only
data_folder.chmod(0o550)
# The missing folder can't be recreated due to permissions denied
exec_command(
capfd,
"start frontend",
"A bind folder is missing and can't be automatically created: ",
f"/data/{project_name}/karma",
)
assert not karma_folder.exists()
# Restore RW permissions
data_folder.chmod(0o770)
exec_command(
capfd,
"start frontend",
"A bind folder was missing and was automatically created: ",
f"/data/{project_name}/karma",
"Stack started",
)
assert karma_folder.exists()
else:
# Deploy a sub-stack
exec_command(
capfd,
"start backend",
"Enabled services: backend",
"Stack started",
)
# Only backend is expected to be running
assert docker.get_container("backend") is not None
assert docker.get_container("neo4j") is None
# Deploy an additional sub-stack
exec_command(
capfd,
"start neo4j",
"Enabled services: neo4j",
"Stack started",
)
# In compose mode additional stack are aggregated
# => both backend and neo4j are expected to be running
assert docker.get_container("backend") is not None
assert docker.get_container("neo4j") is not None
# exec_command(
# capfd,
# "start",
# "A stack is already running.",
# )
exec_command(
capfd,
"start",
"Stack started",
)
|
# Copyright 2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import warnings
import apache_beam as beam
from klio_audio.transforms import io as aio
from klio_audio.transforms import audio
import transforms
warnings.simplefilter("ignore")
loggers_to_mute = (
"apache_beam.io.filebasedsink",
"apache_beam.runners.worker.statecache",
"apache_beam.runners.portability.fn_api_runner",
"apache_beam.runners.portability.fn_api_runner_transforms",
"apache_beam.internal.gcp.auth",
"oauth2client.transport",
"oauth2client.client",
# The concurrency logs may be different for every machine, so let's
# just turn them off
"klio.concurrency",
)
for logger in loggers_to_mute:
logging.getLogger(logger).setLevel(logging.ERROR)
logging.getLogger("klio").setLevel(logging.DEBUG)
def run(in_pcol, job_config):
# load 5 seconds of audio and get STFT
stft = (
in_pcol
| aio.GcsLoadBinary()
| audio.LoadAudio(offset=10, duration=5)
| audio.GetSTFT()
)
# get magnitude of audio
magnitude = (
stft | "Get magnitude" >> beam.ParDo(transforms.GetMagnitude()).with_outputs()
)
# map the result to a key (the KlioMessage element)
# so we can group all results by key
magnitude_key = (
magnitude.spectrogram
| "element to spec" >> beam.Map(transforms.create_key_from_element)
)
# get nearest neighbors and map the result to a key (the KlioMessage element)
nn_filter = (
magnitude.spectrogram
| "Get nn filter" >> beam.ParDo(transforms.FilterNearestNeighbors())
| "element to filter" >> beam.Map(transforms.create_key_from_element)
)
# map together the full magnitude with its filter by key (the KlioMessage element)
merge = (
{"full": magnitude_key, "nnfilter": nn_filter}
| "merge" >> beam.CoGroupByKey()
)
# calc the difference between full magnitude and the filter
net = merge | beam.Map(transforms.subtract_filter_from_full)
# create a mask from the filter minus the difference of full & filter
first_mask = (
{"first": nn_filter, "second": net, "full": magnitude_key}
| "first mask group" >> beam.CoGroupByKey()
| "first mask" >> beam.ParDo(transforms.GetSoftMask(margin=2))
)
# create another mask from the difference of full & filter minus the filter
second_mask = (
{"first": net, "second": nn_filter, "full": magnitude_key}
| "second mask group" >> beam.CoGroupByKey()
| "second mask" >> beam.ParDo(transforms.GetSoftMask(margin=10))
)
# plot the full magnitude spectrogram
magnitude_out = (
magnitude.spectrogram
| "full spec" >> audio.GetSpec()
| "plot full spec" >> audio.SpecToPlot(title="Full Spectrogam for {element}", y_axis="log")
| "save full" >> aio.GcsUploadPlot(suffix="-full")
)
# plot the first mask (background) spectrogram
background_out = (
first_mask
| "background spec" >> audio.GetSpec()
| "plot background spec" >> audio.SpecToPlot(title="Background Spectrogam for {element}", y_axis="log")
| "save background" >> aio.GcsUploadPlot(suffix="-background")
)
# plot the second mask (foreground) spectrogram
foreground_out = (
second_mask
| "foreground spec" >> audio.GetSpec()
| "plot forground spec" >> audio.SpecToPlot(title="Foreground Spectrogam for {element}", y_axis="log")
| "save foreground" >> aio.GcsUploadPlot(suffix="-foreground")
)
return (
(magnitude_out, background_out, foreground_out)
| "flatten output paths" >> beam.Flatten()
| "remove dups" >> beam.Distinct()
)
|
"""
Contains class that generates the 'locality.txt' file for any state.
locality.txt contains the following columns:
election_administration_id,
external_identifier_type,
external_identifier_othertype,
external_identifier_value,
name,
polling_location_ids,
state_id,
type,
other_type,
id
"""
import pandas as pd
import re
import config
class LocalityTxt(object):
"""#
"""
def __init__(self, early_voting_df, state):
self.base_df = early_voting_df
self.state = state
#print self.base_df
def create_election_administration_id(self, index):
"""Creates election_administration_ids by concatenating a prefix with an 'index_str' based on the Dataframe's
row index. '0s' are added, if necesary, to maintain a consistent id length. As currently designed the method
works up to index 9,999"""
return None
# if index <= 9:
# index_str = '000' + str(index)
# return prefix + index_str
# elif index in range(10,100):
# index_str = '00' + str(index)
# return prefix + index_str
# elif index in range(100, 1000):
# index_str = '0' + str(index)
# return prefix + index_str
# else:
# index_str = str(index)
# return prefix + index_str
def get_external_identifier_type(self):
"""#"""
return "ocd-id"
def get_external_identifier_othertype(self):
# create conditional when/if column is present
return ''
def get_external_identifier_value(self, external_identifier_value):
"""Extracts external identifier (ocd-division)."""
if external_identifier_value:
return external_identifier_value
else:
return ''
def create_name(self, index, division_description):
"""
Creates a name by concatenating the 'locality' (town name along with town or county designation)
with an 'index_str' based on the Dataframes row index.'0s' are added, if necesary, to
maintain a consistent id length.
"""
# Get locality(town or county), and remove state abbreviation.
if division_description:
locality = str(division_description[:-3].lower().replace(" ", "_"))
#print locality
else:
locality = ''
print 'Missing data at row ' + str(index) + '.'
# Add leading '0s' depending on index number.
if index <= 9:
index_str = '000' + str(index)
elif index in range(10,100):
index_str = '00' + str(index)
elif index in range(100, 1000):
index_str = '0' + str(index)
else:
index_str = str(index)
return locality + index_str
def create_polling_location_ids(self, polling_location_id):
"""
Creates polling_location_ids by concatenating 'poll' with an 'index_str' based on the Dataframe's row index.
'0s' are added, if necesary, to maintain a consistent id length.
"""
return polling_location_id
# if index <= 9:
# index_str = '000' + str(index)
# return 'poll' + index_str
# elif index in range(10, 100):
# index_str = '00' + str(index)
# return 'poll' + index_str
# elif index in range(100, 1000):
# index_str = '0' + str(index)
# return 'poll' + index_str
# elif index:
# index_str = str(index)
# return 'poll' + index_str
# else:
# return ''
def create_state_id(self):
"""Creates the state_id by matching a key in the state_dict and retrieving
and modifying its value. A '0' is added, if necessary, to maintain a
consistent id length.
"""
for key, value in config.fips_dict.iteritems():
if key == self.state:
state_num = value
if state_num <=9:
state_num = '0' + str(state_num)
else:
state_num = str(state_num)
return 'st' + state_num
def get_type(self):
# create conditional when/if column is present
return 'other'
def get_other_type(self):
# create conditional when/if column is present
return ''
def create_id(self, index):
"""Creates a sequential id by concatenating 'loc' with an 'index_str' based on the Dataframe's row index.
'0s' are added, if necesary, to maintain a consistent id length.
"""
if index <=9:
index_str = '000' + str(index)
return 'loc' + index_str
elif index in range(10,100):
index_str = '00' + str(index)
return 'loc' + index_str
elif index in range(100, 1000):
index_str = '0' + str(index)
return 'loc' + index_str
elif index:
index_str = str(index)
return 'loc' + index_str
else:
return ''
def build_locality_txt(self):
"""
New columns that match the 'locality.txt' template are inserted into the DataFrame, apply() is
used to run methods that generate the values for each row of the new columns.
"""
self.base_df['election_administration_id'] = self.base_df.apply(
lambda row: self.create_election_administration_id(row['index']), axis=1)
self.base_df['external_identifier_type'] = self.base_df.apply(
lambda row: self.get_external_identifier_type(), axis=1)
self.base_df['external_identifier_othertype'] = self.base_df.apply(
lambda row: self.get_external_identifier_othertype(), axis=1)
self.base_df['external_identifier_value'] = self.base_df.apply(
lambda row: self.get_external_identifier_value(row['ocd_division']), axis=1)
self.base_df['name'] = self.base_df.apply(
lambda row: self.create_name(row['index'], row['division_description']), axis=1)
self.base_df['polling_location_ids'] = self.base_df.apply(
lambda row: self.create_polling_location_ids(row['polling_location_id']), axis=1)
self.base_df['state_id'] = self.base_df.apply(
lambda row: self.create_state_id(), axis=1)
self.base_df['type'] = self.base_df.apply(
lambda row: self.get_type(), axis=1)
self.base_df['other_type'] = self.base_df.apply(
lambda row: self.get_other_type(), axis=1)
self.base_df['id'] = self.base_df.apply(
lambda row: self.create_id(row['index']), axis=1)
return self.base_df
# def group_polling_location_ids(self, frame):
#frame = self.build_locality_txt()
# return pd.concat(g for _, g in frame.groupby("external_identifier_value") if len(g) > 1)
#return frame.groupby('external_identifier_value')
# def dedupe(self, dupe):
# """#"""
# return dupe.drop_duplicates(subset='external_identifier_value')
def final_build(self):
loc = self.build_locality_txt()
#print loc
# Group by county.
loc = loc.groupby(['external_identifier_value']).agg(lambda x: ' '.join(set(x))).reset_index()
#print loc
loc['name'] = loc['name'].apply(lambda x: ''.join(x.split(' ')[0]))
loc['grouped_index'] = loc.index + 1
loc['id'] = loc.apply(
lambda row: self.create_id(row['grouped_index']), axis=1)
# reorder columns
cols =['election_administration_id', 'external_identifier_type', 'external_identifier_othertype',
'external_identifier_value', 'name', 'polling_location_ids', 'state_id', 'type',
'other_type', 'grouped_index', 'id']
final = loc.reindex(columns=cols)
#print final
final.drop(['grouped_index'], inplace=True, axis=1)
return final
def write_locality_txt(self):
"""Drops base DataFrame columns then writes final dataframe to text or csv file"""
loc = self.final_build()
#print loc
loc.to_csv(config.output + 'locality.txt', index=False, encoding='utf-8') # send to txt file
loc.to_csv(config.output + 'locality.csv', index=False, encoding='utf-8') # send to csv file
if __name__ == '__main__':
state_file = 'intermediate_doc.csv'
early_voting_file = "/home/acg/democracyworks/hand-collection-to-vip/new_jersey/output/" + state_file
colnames = ['office_name', 'official_title', 'ocd_division', 'division_description', 'homepage_url', 'phone',
'email', 'street', 'directions', 'city', 'state', 'zip', 'start_time', 'end_time', 'start_date',
'end_date', 'must_apply_for_mail_ballot', 'notes', 'index', 'address_line', 'hours', 'photo_uri',
'hours_open_id', 'is_drop_box', 'is_early_voting', 'latitude', 'longitude', 'latlng_source', 'polling_location_id']
early_voting_df = pd.read_csv(early_voting_file, names=colnames, encoding='utf-8', skiprows=1, delimiter=',')
early_voting_df['index'] = early_voting_df.index +1 # offsets zero based index so it starts at 1 for ids
#print early_voting_df
lt = LocalityTxt(early_voting_df, config.state)
lt.write_locality_txt()
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import socket
import struct
import logging
from util.compatibility import text_
logger = logging.getLogger('util')
class IPAddresss:
def __init__(self, ipdbFile):
self.ipdb = open(ipdbFile, "rb")
str = self.ipdb.read(8)
(self.firstIndex, self.lastIndex) = struct.unpack('II', str)
self.indexCount = int((self.lastIndex - self.firstIndex) / 7 + 1)
# print self.getVersion(), u" 纪录总数: %d 条 "%(self.indexCount)
def getVersion(self):
s = self.getIpAddr(0xffffff00)
return s
def getAreaAddr(self, offset=0):
if offset:
self.ipdb.seek(offset)
str = self.ipdb.read(1)
(byte,) = struct.unpack('B', str)
if byte == 0x01 or byte == 0x02:
p = self.getLong3()
if p:
return self.getString(p)
else:
return ""
else:
self.ipdb.seek(-1, 1)
return self.getString(offset)
def getAddr(self, offset, ip=0):
self.ipdb.seek(offset + 4)
countryAddr = text_("")
areaAddr = text_("")
str = self.ipdb.read(1)
(byte,) = struct.unpack('B', str)
if byte == 0x01:
countryOffset = self.getLong3()
self.ipdb.seek(countryOffset)
str = self.ipdb.read(1)
(b,) = struct.unpack('B', str)
if b == 0x02:
countryAddr = self.getString(self.getLong3())
self.ipdb.seek(countryOffset + 4)
else:
countryAddr = self.getString(countryOffset)
areaAddr = self.getAreaAddr()
elif byte == 0x02:
countryAddr = self.getString(self.getLong3())
areaAddr = self.getAreaAddr(offset + 8)
else:
countryAddr = self.getString(offset + 4)
areaAddr = self.getAreaAddr()
return countryAddr + text_(" ") + areaAddr
def dump(self, first, last):
if last > self.indexCount:
last = self.indexCount
for index in range(first, last):
offset = self.firstIndex + index * 7
self.ipdb.seek(offset)
buf = self.ipdb.read(7)
(ip, of1, of2) = struct.unpack("IHB", buf)
address = self.getAddr(of1 + (of2 << 16))
# 把GBK转为utf-8
address = text_(address, 'gbk').encode("utf-8")
logger.info("%d %s %s" % (index, self.ip2str(ip), address))
def setIpRange(self, index):
offset = self.firstIndex + index * 7
self.ipdb.seek(offset)
buf = self.ipdb.read(7)
(self.curStartIp, of1, of2) = struct.unpack("IHB", buf)
self.curEndIpOffset = of1 + (of2 << 16)
self.ipdb.seek(self.curEndIpOffset)
buf = self.ipdb.read(4)
(self.curEndIp,) = struct.unpack("I", buf)
def getIpAddr(self, ip):
L = 0
R = self.indexCount - 1
while L < R - 1:
M = int((L + R) / 2)
self.setIpRange(M)
if ip == self.curStartIp:
L = M
break
if ip > self.curStartIp:
L = M
else:
R = M
self.setIpRange(L)
# version information, 255.255.255.X, urgy but useful
if ip & 0xffffff00 == 0xffffff00:
self.setIpRange(R)
if self.curStartIp <= ip <= self.curEndIp:
address = self.getAddr(self.curEndIpOffset)
# 把GBK转为utf-8
address = text_(address)
else:
address = text_("未找到该IP的地址")
return address
def getIpRange(self, ip):
self.getIpAddr(ip)
range = self.ip2str(self.curStartIp) + ' - ' \
+ self.ip2str(self.curEndIp)
return range
def getString(self, offset=0):
if offset:
self.ipdb.seek(offset)
str = b''
ch = self.ipdb.read(1)
(byte,) = struct.unpack('B', ch)
while byte != 0:
str += ch
ch = self.ipdb.read(1)
(byte,) = struct.unpack('B', ch)
return str.decode('gbk')
def ip2str(self, ip):
return str(ip >> 24) + '.' + str((ip >> 16) & 0xff) + '.' + str((ip >> 8) & 0xff) + '.' + str(ip & 0xff)
def str2ip(self, s):
(ip,) = struct.unpack('I', socket.inet_aton(s))
return ((ip >> 24) & 0xff) | ((ip & 0xff) << 24) | ((ip >> 8) & 0xff00) | ((ip & 0xff00) << 8)
def getLong3(self, offset=0):
if offset:
self.ipdb.seek(offset)
str = self.ipdb.read(3)
(a, b) = struct.unpack('HB', str)
return (b << 16) + a
|
import torch
from torch import optim
import torch.nn as nn
import numpy as np
import logging
from deeprobust.image.attack.base_attack import BaseAttack
from deeprobust.image.utils import onehot_like
from deeprobust.image.optimizer import AdamOptimizer
class CarliniWagner(BaseAttack):
"""
C&W attack is an effective method to calcuate high-confidence adversarial examples.
References
----------
.. [1] Carlini, N., & Wagner, D. (2017, May). Towards evaluating the robustness of neural networks. https://arxiv.org/pdf/1608.04644.pdf
This reimplementation is based on https://github.com/kkew3/pytorch-cw2
Copyright 2018 Kaiwen Wu
Examples
--------
>>> from deeprobust.image.attack.cw import CarliniWagner
>>> from deeprobust.image.netmodels.CNN import Net
>>> from deeprobust.image.config import attack_params
>>> model = Net()
>>> model.load_state_dict(torch.load("./trained_models/MNIST_CNN_epoch_20.pt", map_location = torch.device('cuda')))
>>> model.eval()
>>> x,y = datasets.MNIST()
>>> attack = CarliniWagner(model, device='cuda')
>>> AdvExArray = attack.generate(x, y, target_label = 1, classnum = 10, **attack_params['CW_MNIST])
"""
def __init__(self, model, device = 'cuda'):
super(CarliniWagner, self).__init__(model, device)
self.model = model
self.device = device
def generate(self, image, label, target_label, **kwargs):
"""
Call this function to generate adversarial examples.
Parameters
----------
image :
original image
label :
target label
kwargs :
user defined paremeters
"""
assert self.check_type_device(image, label)
assert self.parse_params(**kwargs)
self.target = target_label
return self.cw(self.model,
self.image,
self.label,
self.target,
self.confidence,
self.clip_min,
self.clip_min,
self.max_iterations,
self.initial_const,
self.binary_search_steps,
self.learning_rate
)
def parse_params(self,
classnum = 10,
confidence = 1e-4,
clip_max = 1,
clip_min = 0,
max_iterations = 1000,
initial_const = 1e-2,
binary_search_steps = 5,
learning_rate = 0.00001,
abort_early = True):
"""
Parse the user defined parameters.
Parameters
----------
classnum :
number of class
confidence :
confidence
clip_max :
maximum pixel value
clip_min :
minimum pixel value
max_iterations :
maximum number of iterations
initial_const :
initialization of binary search
binary_search_steps :
step number of binary search
learning_rate :
learning rate
abort_early :
Set abort_early = True to allow early stop
"""
self.classnum = classnum
self.confidence = confidence
self.clip_max = clip_max
self.clip_min = clip_min
self.max_iterations = max_iterations
self.initial_const = initial_const
self.binary_search_steps = binary_search_steps
self.learning_rate = learning_rate
self.abort_early = abort_early
return True
def cw(self, model, image, label, target, confidence, clip_max, clip_min, max_iterations, initial_const, binary_search_steps, learning_rate):
#change the input image
img_tanh = self.to_attack_space(image.cpu())
img_ori ,_ = self.to_model_space(img_tanh)
img_ori = img_ori.to(self.device)
#binary search initialization
c = initial_const
c_low = 0
c_high = np.inf
found_adv = False
last_loss = np.inf
for step in range(binary_search_steps):
#initialize w : perturbed image in tanh space
w = torch.from_numpy(img_tanh.numpy())
optimizer = AdamOptimizer(img_tanh.shape)
is_adversarial = False
for iteration in range(max_iterations):
# adversary example
img_adv, adv_grid = self.to_model_space(w)
img_adv = img_adv.to(self.device)
img_adv.requires_grad = True
#output of the layer before softmax
output = model.get_logits(img_adv)
#pending success
is_adversarial = self.pending_f(img_adv)
#calculate loss function and gradient of loss funcition on x
loss, loss_grad = self.loss_function(
img_adv, c, self.target, img_ori, self.confidence, self.clip_min, self.clip_max
)
#calculate gradient of loss function on w
gradient = adv_grid.to(self.device) * loss_grad.to(self.device)
w = w + torch.from_numpy(optimizer(gradient.cpu().detach().numpy(), learning_rate)).float()
if is_adversarial:
found_adv = True
#do binary search on c
if found_adv:
c_high = c
else:
c_low = c
if c_high == np.inf:
c *= 10
else:
c = (c_high + c_low) / 2
if (step % 10 == 0):
print("iteration:{:.0f},loss:{:.4f}".format(step,loss))
# if (step == 50):
# learning_rate = learning_rate/100
#abort early
if(self.abort_early == True and (step % 10) == 0 and step > 100) :
print("early abortion?", loss, last_loss)
if not (loss <= 0.9999 * last_loss):
break
last_loss = loss
return img_adv.detach()
def loss_function(
self, x_p, const, target, reconstructed_original, confidence, min_, max_):
"""Returns the loss and the gradient of the loss w.r.t. x,
assuming that logits = model(x)."""
## get the output of model before softmax
x_p.requires_grad = True
logits = self.model.get_logits(x_p).to(self.device)
## find the largest class except the target class
targetlabel_mask = (torch.from_numpy(onehot_like(np.zeros(self.classnum), target))).double()
secondlargest_mask = (torch.from_numpy(np.ones(self.classnum)) - targetlabel_mask).to(self.device)
secondlargest = np.argmax((logits.double() * secondlargest_mask).cpu().detach().numpy())
is_adv_loss = logits[0][secondlargest] - logits[0][target]
# is_adv is True as soon as the is_adv_loss goes below 0
# but sometimes we want additional confidence
is_adv_loss += confidence
if is_adv_loss == 0:
is_adv_loss_grad = 0
else:
is_adv_loss.backward()
is_adv_loss_grad = x_p.grad
is_adv_loss = max(0, is_adv_loss)
s = max_ - min_
squared_l2_distance = np.sum( ((x_p - reconstructed_original) ** 2).cpu().detach().numpy() ) / s ** 2
total_loss = squared_l2_distance + const * is_adv_loss
squared_l2_distance_grad = (2 / s ** 2) * (x_p - reconstructed_original)
#print(is_adv_loss_grad)
total_loss_grad = squared_l2_distance_grad + const * is_adv_loss_grad
return total_loss, total_loss_grad
def pending_f(self, x_p):
"""Pending is the loss function is less than 0
"""
targetlabel_mask = torch.from_numpy(onehot_like(np.zeros(self.classnum), self.target))
secondlargest_mask = torch.from_numpy(np.ones(self.classnum)) - targetlabel_mask
targetlabel_mask = targetlabel_mask.to(self.device)
secondlargest_mask = secondlargest_mask.to(self.device)
Zx_i = np.max((self.model.get_logits(x_p).double().to(self.device) * secondlargest_mask).cpu().detach().numpy())
Zx_t = np.max((self.model.get_logits(x_p).double().to(self.device) * targetlabel_mask).cpu().detach().numpy())
if ( Zx_i - Zx_t < - self.confidence):
return True
else:
return False
def to_attack_space(self, x):
x = x.detach()
# map from [min_, max_] to [-1, +1]
# x'=(x- 0.5 * (max+min) / 0.5 * (max-min))
a = (self.clip_min + self.clip_max) / 2
b = (self.clip_max - self.clip_min) / 2
x = (x - a) / b
# from [-1, +1] to approx. (-1, +1)
x = x * 0.999999
# from (-1, +1) to (-inf, +inf)
return np.arctanh(x)
def to_model_space(self, x):
"""Transforms an input from the attack space
to the model space. This transformation and
the returned gradient are elementwise."""
# from (-inf, +inf) to (-1, +1)
x = np.tanh(x)
grad = 1 - np.square(x)
# map from (-1, +1) to (min_, max_)
a = (self.clip_min + self.clip_max) / 2
b = (self.clip_max - self.clip_min) / 2
x = x * b + a
grad = grad * b
return x, grad
|
import numpy as np
class LogisticRegression:
def __init__(self, lr=0.001, n_iters = 1000):
self.lr = lr
self.n_iters = n_iters
self.weights = None
self.bias = None
def fit (self, X, y):
n_samples, n_features = X.shape
# initalize the weights
self.weights = np.random.randn(n_features)
self.bias = 0
for _ in range(self.n_iters):
# 1. multiply the X matrix and the weights
z = np.dot(X, self.weights) + self.bias
# 2. apply the sigmoid function
y_pred = self._sigmoid(z)
# 3. compute gradients and update weights and bias
self.weights -= (self.lr/n_samples) * np.dot(X.T, (y_pred - y))
self.bias -= (self.lr/n_samples) * np.sum(y_pred - y)
def predict(self, X):
# 1. multiply the X matrix and the weights
z = np.dot(X, self.weights) + self.bias
# 2. apply the sigmoid function
y_pred = self._sigmoid(z)
y_pred_res = [1 if i >0.5 else 0 for i in y_pred]
return np.array(y_pred_res)
def _sigmoid(self, z):
return 1/ (1 + np.exp(-z) )
|
"""Change coordinates"""
from numpy import arccos, arctan2, hypot, sqrt
def to_polar(coo_x, coo_y):
"""
r, θ = to_polar(x, y)
Change Cartesian coordinates to Polar coordinates.
Parameters
----------
x : array_like
first Cartesian coordinate
y : array_like
seconde Cartesian coordinate
Returns
-------
r : ndarray
radial coordinate
θ : ndarray
angular coordinate
"""
return (hypot(coo_x, coo_y), arctan2(coo_y, coo_x))
def to_spheric(coo_x, coo_y, coo_z):
"""
r, θ, φ = to_spheric(x, y, z)
Change Cartesian coordinates to Spherical coordinates.
Parameters
----------
x : array_like
first Cartesian coordinate
y : array_like
seconde Cartesian coordinate
z : array_like
third Cartesian coordinate
Returns
-------
r : ndarray
radial distance
θ : ndarray
azimuthal angle
φ : ndarray
polar angle
"""
coo_r = sqrt(coo_x * coo_x + coo_y * coo_y + coo_z * coo_z)
return (coo_r, arctan2(coo_y, coo_x), arccos(coo_z / coo_r))
|
##########################################################################
#
# Copyright 2010 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios),
# its affiliates and/or its licensors.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import hou, IECore, IECoreHoudini
'''
This contains utility methods for doing useful stuff in the IECoreHoudini
python module.
'''
# returns an instance of a procedural loaded using the defaultProceduralLoader
def proc(type, ver):
return IECore.ClassLoader.defaultProceduralLoader().load(type,ver)()
# returns an instance of an op loaded using the defaultOpLoader
def op(type, ver):
return IECore.ClassLoader.defaultOpLoader().load(type,ver)()
# sets a houdini parameter based on the value from it's corresponding cortex parameter
def setHoudiniParm( node, p ):
value = p.getValue().value
if p.typeId()==IECore.TypeId.IntParameter:
node.parmTuple( "parm_%s" % p.name ).set( [value] )
if p.typeId()==IECore.TypeId.V2iParameter:
node.parmTuple( "parm_%s" % p.name ).set( list(value) )
if p.typeId()==IECore.TypeId.V3iParameter:
node.parmTuple( "parm_%s" % p.name ).set( list(value) )
# float, V2f, V3f
if p.typeId()==IECore.TypeId.FloatParameter:
node.parmTuple( "parm_%s" % p.name ).set( [value] )
if p.typeId()==IECore.TypeId.V2fParameter:
node.parmTuple( "parm_%s" % p.name ).set( list(value) )
if p.typeId()==IECore.TypeId.V3fParameter:
node.parmTuple( "parm_%s" % p.name ).set( list(value) )
# double
if p.typeId()==IECore.TypeId.DoubleParameter:
node.parmTuple( "parm_%s" % p.name ).set( [value] )
if p.typeId()==IECore.TypeId.V2dParameter:
node.parmTuple( "parm_%s" % p.name ).set( list(value) )
if p.typeId()==IECore.TypeId.V3dParameter:
node.parmTuple( "parm_%s" % p.name ).set( list(value) )
# bool
if p.typeId()==IECore.TypeId.BoolParameter:
node.parmTuple( "parm_%s" % p.name ).set( [value] )
# string
if p.typeId()==IECore.TypeId.StringParameter:
node.parmTuple( "parm_%s" % p.name ).set( [value] )
# path, dirname, filename, filesequence
if p.typeId()==IECore.TypeId.PathParameter:
node.parmTuple( "parm_%s" % p.name ).set( [value] )
if p.typeId()==IECore.TypeId.DirNameParameter:
node.parmTuple( "parm_%s" % p.name ).set( [value] )
if p.typeId()==IECore.TypeId.FileNameParameter:
node.parmTuple( "parm_%s" % p.name ).set( [value] )
if p.typeId()==IECore.TypeId.FileSequenceParameter:
node.parmTuple( "parm_%s" % p.name ).set( [value] )
# color3f
if p.typeId()==IECore.TypeId.Color3fParameter:
node.parmTuple( "parm_%s" % p.name ).set( list(value) )
# color4f
if p.typeId()==IECore.TypeId.Color4fParameter:
node.parmTuple( "parm_%s" % p.name ).set( list(value) )
# M44f, M44d
if p.typeId()==IECore.TypeId.M44fParameter:
node.parmTuple( "parm_%s" % p.name ).set( list(value) )
if p.typeId()==IECore.TypeId.M44dParameter:
node.parmTuple( "parm_%s" % p.name ).set( list(value) )
# Box2i, Box2f, Box2d
if p.typeId()==IECore.TypeId.Box2iParameter:
node.parmTuple( "parm_%s" % p.name ).set( list(value) )
if p.typeId()==IECore.TypeId.Box2fParameter:
node.parmTuple( "parm_%s" % p.name ).set( list(value) )
if p.typeId()==IECore.TypeId.Box2dParameter:
node.parmTuple( "parm_%s" % p.name ).set( list(value) )
# Box3i, Box3f, Box3d
if p.typeId()==IECore.TypeId.Box3iParameter:
node.parmTuple( "parm_%s" % p.name ).set( list(value) )
if p.typeId()==IECore.TypeId.Box3fParameter:
node.parmTuple( "parm_%s" % p.name ).set( list(value) )
if p.typeId()==IECore.TypeId.Box3dParameter:
node.parmTuple( "parm_%s" % p.name ).set( list(value) )
# updates all the houdini parameters based on an Op/Procedural's parameter values
def syncSopParametersWithProcedural(n):
fn = IECoreHoudini.FnProceduralHolder( n )
parms = fn.getParameterised().parameters().values()
for p in parms:
if n.parm("parm_%s"%p.name):
setHoudiniParm( n, p )
def syncSopParametersWithOp(n):
fn = IECoreHoudini.FnOpHolder( n )
parms = fn.getParameterised().parameters().values()
for p in parms:
if n.parm("parm_%s"%p.name):
setHoudiniParm( n, p )
# reloads a procedural based on the values of the type/version parameters
# \todo: this can be combined with the reloadOp code
def reloadProcedural():
n = hou.node(".")
type = n.evalParm("__opType")
ver = n.evalParm("__opVersion")
if type=="" or ver=="":
return
ver = int(ver)
fn = IECoreHoudini.FnProceduralHolder(n)
IECore.ClassLoader.defaultProceduralLoader().refresh()
cl = IECoreHoudini.proc( type, ver )
# cache our existing parameters
parms = fn.getParameterised().parameters().values()
saved_parms = {}
for p in parms:
saved_parms[p.name] = p.getValue().value
# reload parameter interface
fn.setParameterised(cl)
# restore parameter values
for p in saved_parms.keys():
hparm = n.parm("parm_%s" % p)
if hparm:
hparm.set( saved_parms[p] )
# reloads an op based on the values of the type/version parameters
# \todo: this can be combined with the reloadProc code
def reloadOp():
n = hou.node(".")
type = n.evalParm("__opType")
ver = n.evalParm("__opVersion")
if type=="" or ver=="":
return
ver = int(ver)
fn = IECoreHoudini.FnOpHolder(n)
IECore.ClassLoader.defaultOpLoader().refresh()
cl = IECoreHoudini.op( type, ver )
# cache our existing parameters
parms = fn.getParameterised().parameters().values()
saved_parms = {}
for p in parms:
saved_parms[p.name] = p.getValue().value
# reload parameter interface
fn.setParameterised(cl)
# restore parameter values
for p in saved_parms.keys():
hparm = n.parm("parm_%s" % p)
if hparm:
hparm.set( saved_parms[p] )
|
import heapq
import weakref
import numpy
from chainer import cuda
class Variable(object):
"""Array with a structure to keep track of computation.
Every variable holds a data array of type either :class:`numpy.ndarray` or
:class:`cupy.ndarray`.
A Variable object may be constructed in two ways: by the user or by some
function. When a variable is created by some function as one of its
outputs, the variable holds a reference to that function. This reference is
used in error backpropagation (a.k.a. backprop). It is also used in
*backward unchaining*. A variable that does not hold a reference to its
creator is called a *root* variable. A variable is root if it is created by
the user, or if the reference is deleted by :meth:`unchain_backward`.
Users can disable this chaining behavior by setting the volatile flag for
the initial variables. When a function gets volatile variables as its
inputs, the output variables do not hold references to the function. This
acts like unchaining on every function application.
Attributes:
data: Data array of type either :class:`numpy.ndarray` or
:class:`cupy.ndarray`.
grad: Gradient array. It is ``None`` until backprop reaches this
variable.
creator: The function who creates this variable. It is ``None`` if the
variable is not created by any function.
volatile: Boolean flag. If True, the variable does not keep track of
any function applications.
"""
def __init__(self, data, volatile=False):
"""Initializes a variable.
Args:
data (:class:`numpy.ndarray` or :class:`cupy.ndarray`):
Data array that this variable holds.
volatile (bool): Volatility flag. If it is True, the variable will
not keep track of any function applications.
"""
assert isinstance(data, (numpy.ndarray, cuda.ndarray))
assert isinstance(volatile, bool)
self.data = data
self.rank = 0
self.volatile = volatile
self.splitter = weakref.ref(lambda: 0) # dead ref
self._grad = None
self.creator = None
def __pos__(self):
return self
def __len__(self):
"""Returns the number of elements of the data array.
Returns:
int: the number of elements of the data array.
"""
return self.data.size
@property
def label(self):
"""Short text that represents the function."""
if self.data.shape == ():
return str(self.data.dtype)
return '%s, %s' % (str(self.data.shape),
str(self.data.dtype))
@property
def grad(self):
return self._grad
@grad.setter
def grad(self, g):
error_msg = '''
This error is occured in two cases. The first case is when the user manually
sets the Variable.grad incorrectly. The second case is when some Function
implementation has a bug. If you do not manually set the Variable.grad in your
script, please report this error to the issue tracker with the stack trace,
the information of your environment, and your script:
https://github.com/pfnet/chainer/issues/new.
'''
if g is not None:
if not isinstance(g, type(self.data)):
raise TypeError('Type of data and grad mismatch: %s != %s%s'
% (type(self.data), type(g), error_msg))
if g.dtype != self.data.dtype:
raise TypeError('Dtype of data and grad mismatch: %s != %s%s'
% (self.data.dtype, g.dtype, error_msg))
if g.shape != self.data.shape:
raise ValueError('Shape of data and grad mismatch: %s != %s%s'
% (self.data.shape, g.shape, error_msg))
self._grad = g
def set_creator(self, gen_func):
"""Notifies the variable that the given function is its creator.
Args:
gen_func (Function): Function object that creates this variable as
one of its outputs.
"""
self.creator = gen_func
self.rank = gen_func.rank + 1
def backward(self, retain_grad=False):
"""Runs error backpropagation (a.k.a. backprop) from this variable.
On backprop, :meth:`Function.backward` is called on each
:class:`Function` object appearing in the backward graph starting from
this variable. The backward graph is represented by backward references
from variables to their creators, and from functions to their inputs.
The backprop stops at all root variables. Some functions set ``None``
as gradients of some inputs, where further backprop does not take place
at such input variables.
This method uses :data:`grad` as the initial error array. User can
manually set a gradient array before calling this method. If
:data:`data` contains only one element (i.e., it is scalar) and
:data:`grad` is None, then this method automatically complements 1.0 as
the initial error. This is useful on starting backprop from some scalar
loss value.
Args:
retain_grad (bool): If True, the gradient arrays of all
intermediate variables are kept. Otherwise, :data:`grad` of the
intermediate variables are set to ``None`` on appropriate
timing, which may reduce the maximum memory consumption.
In most cases of training some model, the purpose of backprop
is to compute gradients of parameters, not of variables, so it
is recommended to set this flag False.
"""
if self.creator is None:
return
cand_funcs = []
seen_set = set()
# Initilize error by 1, if this is a loss variable
if self.data.size == 1 and self.grad is None:
with cuda.get_device(self.data) as device:
if device is cuda.DummyDevice:
self.grad = numpy.ones_like(self.data)
else:
self.grad = cuda.cupy.ones_like(self.data)
def add_cand(cand):
if cand is not None and cand not in seen_set:
# Negate since heapq is min-heap
heapq.heappush(cand_funcs, (-cand.rank, len(seen_set), cand))
seen_set.add(cand)
add_cand(self.creator)
while cand_funcs:
_, _, func = heapq.heappop(cand_funcs)
outputs = tuple(y() for y in func.outputs) # access via weak ref
in_data = tuple(x.data for x in func.inputs)
out_grad = tuple(None if y is None else y.grad for y in outputs)
with cuda.get_device(*(in_data + out_grad)):
gxs = func.backward(in_data, out_grad)
assert len(gxs) == len(in_data)
if not retain_grad:
for y in outputs:
if y is not None and y is not self:
y.grad = None
for x, gx in zip(func.inputs, gxs):
x.grad = gx
if gx is not None: # skip if gradient does not flow
add_cand(x.creator)
def unchain_backward(self):
"""Deletes references between variables and functions backward.
After this method completes, intermediate variables and functions that
are not referenced from anywhere are deallocated by reference
count GC. Also this variable itself deletes the reference to its
creator function, i.e. this variable becomes root in the computation
graph. It indicates that backprop after unchaining stops at this
variable. This behavior is useful to implement truncated BPTT.
"""
cand_funcs = []
seen_set = set()
def add_cand(cand):
if cand is not None and cand not in seen_set:
cand_funcs.append(cand)
seen_set.add(cand)
add_cand(self.creator)
while cand_funcs:
func = cand_funcs.pop()
for var in func.inputs:
add_cand(var.creator)
func.unchain()
def __lt__(self, other):
raise NotImplementedError()
def __le__(self, other):
raise NotImplementedError()
def __eq__(self, other):
raise NotImplementedError()
def __ne__(self, other):
raise NotImplementedError()
def __gt__(self, other):
raise NotImplementedError()
def __ge__(self, other):
raise NotImplementedError()
def __nonzero__(self):
raise NotImplementedError()
def __bool__(self):
raise NotImplementedError()
def __hash__(self):
return super(Variable, self).__hash__()
__array_priority__ = 200
|
from __future__ import with_statement
import datetime
import fiscalyear
import pytest
# Fiscal calendars to test
US_FEDERAL = ("previous", 10, 1)
UK_PERSONAL = ("same", 4, 6)
class TestCheckInt(object):
@pytest.mark.parametrize(
"value, exception",
[
("asdf", TypeError),
("-999", TypeError),
# Technically speaking, _check_int should accept negative integers
# but this isn't a public function + datetime doesn't handle them
# anyway.
(float(), TypeError),
(object(), TypeError),
],
)
def test_invalid_input(self, value, exception):
with pytest.raises(exception):
fiscalyear._check_int(value)
@pytest.mark.parametrize("value", [1, 2, 0, -1, -2, "1", "0", "999"])
def test_valid_input(self, value):
assert int(value) == fiscalyear._check_int(value)
class TestCheckYear(object):
@pytest.mark.parametrize(
"value, exception",
[
("asdf", TypeError),
(float(), TypeError),
(object(), TypeError),
("-1", TypeError),
(-1, ValueError),
(0, ValueError),
("0", ValueError),
(10000, ValueError),
("10000", ValueError),
],
)
def test_invalid_input(self, value, exception):
with pytest.raises(exception):
fiscalyear._check_year(value)
@pytest.mark.parametrize("value", [1, 2, "1", "999"])
def test_valid_input(self, value):
assert int(value) == fiscalyear._check_year(value)
class TestCheckDay(object):
@pytest.mark.parametrize(
"month, day, exception",
[
(1, "asdf", TypeError),
(1, "-999", TypeError),
(1, float(), TypeError),
(1, object(), TypeError),
(1, -1, ValueError),
(1, "-1", TypeError),
(1, 0, ValueError),
(1, "0", ValueError),
(1, 32, ValueError),
(1, 32, ValueError),
],
)
def test_invalid_input(self, month, day, exception):
with pytest.raises(exception):
fiscalyear._check_day(month, day)
@pytest.mark.parametrize(
"month, day", [(1, 1), (1, 2), (1, "1"), (1, 31), (1, "31")]
)
def test_valid_input(self, month, day):
assert int(day) == fiscalyear._check_day(month, day)
class TestCheckQuarter(object):
@pytest.mark.parametrize(
"value, exception",
[
("asdf", TypeError),
(float(), TypeError),
(object(), TypeError),
("-1", TypeError),
(-1, ValueError),
(0, ValueError),
("0", ValueError),
(5, ValueError),
("5", ValueError),
],
)
def test_invalid_input(self, value, exception):
with pytest.raises(exception):
fiscalyear._check_quarter(value)
@pytest.mark.parametrize("value", [1, 2, "1", "4"])
def test_valid_input(self, value):
assert int(value) == fiscalyear._check_quarter(value)
class TestValidateFiscalCalendarParams(object):
@pytest.mark.parametrize(
"arguments, exception",
[
(dict(start_year="asdf", start_month=12, start_day=1), ValueError),
(dict(start_year=float(1999), start_month=12, start_day=1), TypeError),
(dict(start_year=object(), start_month=12, start_day=1), TypeError),
(dict(start_year="same", start_month="asdf", start_day=1), TypeError),
(dict(start_year="same", start_month=float(12), start_day=1), TypeError),
(dict(start_year="same", start_month=object(), start_day=1), TypeError),
(dict(start_year="same", start_month=-1, start_day=1), ValueError),
(dict(start_year="same", start_month=0, start_day=1), ValueError),
(dict(start_year="same", start_month=13, start_day=1), ValueError),
(dict(start_year="same", start_month=12, start_day="asdf"), TypeError),
(dict(start_year="same", start_month=12, start_day=float(1)), TypeError),
(dict(start_year="same", start_month=12, start_day=object()), TypeError),
(dict(start_year="same", start_month=12, start_day=0), ValueError),
(dict(start_year="same", start_month=12, start_day=-1), ValueError),
(dict(start_year="same", start_month=12, start_day=32), ValueError),
],
)
def test_invalid_input(self, arguments, exception):
with pytest.raises(exception):
fiscalyear._validate_fiscal_calendar_params(**arguments)
@pytest.mark.parametrize(
"arguments",
[
dict(start_year="same", start_month=1, start_day=1),
dict(start_year="same", start_month=1, start_day=31),
dict(start_year="same", start_month=12, start_day=1),
dict(start_year="previous", start_month=1, start_day=1),
dict(start_year="previous", start_month=1, start_day=31),
dict(start_year="previous", start_month=12, start_day=1),
],
)
def test_valid_input(self, arguments):
fiscalyear._validate_fiscal_calendar_params(**arguments)
class TestSetupFiscalCalendar(object):
def test_start_year(self):
assert fiscalyear.START_YEAR == "previous"
fiscalyear.setup_fiscal_calendar(start_year="same")
assert fiscalyear.START_YEAR == "same"
fiscalyear.setup_fiscal_calendar(start_year="previous")
assert fiscalyear.START_YEAR == "previous"
def test_start_month(self):
assert fiscalyear.START_MONTH == 10
fiscalyear.setup_fiscal_calendar(start_month=4)
assert fiscalyear.START_MONTH == 4
fiscalyear.setup_fiscal_calendar(start_month=10)
assert fiscalyear.START_MONTH == 10
def test_start_day(self):
assert fiscalyear.START_DAY == 1
fiscalyear.setup_fiscal_calendar(start_day=6)
assert fiscalyear.START_DAY == 6
fiscalyear.setup_fiscal_calendar(start_day=1)
assert fiscalyear.START_DAY == 1
def test_complex(self):
# Test defaults
day = fiscalyear.FiscalDate(2017, 12, 1)
assert day.fiscal_year == 2018
assert day.fiscal_quarter == 1
# Change fiscal year settings
fiscalyear.setup_fiscal_calendar("same", 1, 1)
assert day.fiscal_year == 2017
assert day.fiscal_quarter == 4
# Restore defaults and re-test
fiscalyear.setup_fiscal_calendar("previous", 10, 1)
assert day.fiscal_year == 2018
assert day.fiscal_quarter == 1
class TestFiscalCalendar:
def test_start_year(self):
assert fiscalyear.START_YEAR == "previous"
with fiscalyear.fiscal_calendar(start_year="same"):
assert fiscalyear.START_YEAR == "same"
assert fiscalyear.START_YEAR == "previous"
def test_start_month(self):
assert fiscalyear.START_MONTH == 10
with fiscalyear.fiscal_calendar(start_month=4):
assert fiscalyear.START_MONTH == 4
assert fiscalyear.START_MONTH == 10
def test_start_day(self):
assert fiscalyear.START_DAY == 1
with fiscalyear.fiscal_calendar(start_day=6):
assert fiscalyear.START_DAY == 6
assert fiscalyear.START_DAY == 1
def test_complex(self):
assert fiscalyear.START_YEAR == "previous"
assert fiscalyear.START_MONTH == 10
assert fiscalyear.START_DAY == 1
with fiscalyear.fiscal_calendar("same", 4, 6):
assert fiscalyear.START_YEAR == "same"
assert fiscalyear.START_MONTH == 4
assert fiscalyear.START_DAY == 6
assert fiscalyear.START_YEAR == "previous"
assert fiscalyear.START_MONTH == 10
assert fiscalyear.START_DAY == 1
def test_nested(self):
assert fiscalyear.START_YEAR == "previous"
assert fiscalyear.START_MONTH == 10
assert fiscalyear.START_DAY == 1
with fiscalyear.fiscal_calendar(start_year="same"):
assert fiscalyear.START_YEAR == "same"
assert fiscalyear.START_MONTH == 10
assert fiscalyear.START_DAY == 1
with fiscalyear.fiscal_calendar(start_month=4):
assert fiscalyear.START_YEAR == "same"
assert fiscalyear.START_MONTH == 4
assert fiscalyear.START_DAY == 1
with fiscalyear.fiscal_calendar(start_day=6):
assert fiscalyear.START_YEAR == "same"
assert fiscalyear.START_MONTH == 4
assert fiscalyear.START_DAY == 6
assert fiscalyear.START_YEAR == "same"
assert fiscalyear.START_MONTH == 4
assert fiscalyear.START_DAY == 1
assert fiscalyear.START_YEAR == "same"
assert fiscalyear.START_MONTH == 10
assert fiscalyear.START_DAY == 1
assert fiscalyear.START_YEAR == "previous"
assert fiscalyear.START_MONTH == 10
assert fiscalyear.START_DAY == 1
def test_wrong_type(self):
with pytest.raises(TypeError):
with fiscalyear.fiscal_calendar(start_year=6.5):
pass
with pytest.raises(TypeError):
with fiscalyear.fiscal_calendar(start_month=6.5):
pass
with pytest.raises(TypeError):
with fiscalyear.fiscal_calendar(start_day="hello world"):
pass
def test_out_of_range(self):
with pytest.raises(ValueError):
with fiscalyear.fiscal_calendar(start_month=0):
pass
with pytest.raises(ValueError):
with fiscalyear.fiscal_calendar(start_month=2, start_day=29):
pass
def test_corner_cases(self):
# start_day does not exist in all months
with fiscalyear.fiscal_calendar(start_month=5, start_day=31):
# Non-leap year
assert fiscalyear.FiscalQuarter(2019, 1).start.day == 31
assert fiscalyear.FiscalQuarter(2019, 1).end.day == 30
assert fiscalyear.FiscalQuarter(2019, 2).start.day == 31
assert fiscalyear.FiscalQuarter(2019, 2).end.day == 29
assert fiscalyear.FiscalQuarter(2019, 3).start.day == 30
assert fiscalyear.FiscalQuarter(2019, 3).end.day == 27
assert fiscalyear.FiscalQuarter(2019, 4).start.day == 28
assert fiscalyear.FiscalQuarter(2019, 4).end.day == 30
# Leap year
assert fiscalyear.FiscalQuarter(2020, 1).start.day == 31
assert fiscalyear.FiscalQuarter(2020, 1).end.day == 30
assert fiscalyear.FiscalQuarter(2020, 2).start.day == 31
assert fiscalyear.FiscalQuarter(2020, 2).end.day == 29
assert fiscalyear.FiscalQuarter(2020, 3).start.day == 30
assert fiscalyear.FiscalQuarter(2020, 3).end.day == 28
assert fiscalyear.FiscalQuarter(2020, 4).start.day == 29
assert fiscalyear.FiscalQuarter(2020, 4).end.day == 30
class TestFiscalYear:
@pytest.fixture(scope="class")
def a(self):
return fiscalyear.FiscalYear(2016)
@pytest.fixture(scope="class")
def b(self):
return fiscalyear.FiscalYear(2017)
@pytest.fixture(scope="class")
def c(self):
return fiscalyear.FiscalYear("2017")
@pytest.fixture(scope="class")
def d(self):
return fiscalyear.FiscalQuarter(2017, 2)
@pytest.fixture(scope="class")
def e(self):
return fiscalyear.FiscalMonth(2017, 1)
@pytest.fixture(scope="class")
def f(self):
return fiscalyear.FiscalDay(2017, 1, 15)
@pytest.fixture(scope="class")
def g(self):
return fiscalyear.FiscalYear(2015)
def test_basic(self, a):
assert a.fiscal_year == 2016
def test_current(self, mocker):
mock_today = mocker.patch.object(fiscalyear.FiscalDate, "today")
mock_today.return_value = fiscalyear.FiscalDate(2016, 10, 1)
current = fiscalyear.FiscalYear.current()
assert current == fiscalyear.FiscalYear(2017)
def test_repr(self, a):
assert repr(a) == "FiscalYear(2016)"
def test_str(self, a):
assert str(a) == "FY2016"
def test_from_string(self, c):
assert c.fiscal_year == 2017
def test_wrong_type(self):
with pytest.raises(TypeError):
fiscalyear.FiscalYear(2017.5)
with pytest.raises(TypeError):
fiscalyear.FiscalYear("hello world")
def test_out_of_range(self):
with pytest.raises(ValueError):
fiscalyear.FiscalYear(0)
with pytest.raises(ValueError):
fiscalyear.FiscalYear(-2017)
def test_prev_fiscal_year(self, a, b):
assert a == b.prev_fiscal_year
def test_next_fiscal_year(self, a, b):
assert a.next_fiscal_year == b
def test_start(self, a):
assert a.start == a.q1.start
with fiscalyear.fiscal_calendar(*US_FEDERAL):
assert a.start == datetime.datetime(2015, 10, 1, 0, 0, 0)
with fiscalyear.fiscal_calendar(*UK_PERSONAL):
assert a.start == datetime.datetime(2016, 4, 6, 0, 0, 0)
def test_end(self, a):
assert a.end == a.q4.end
with fiscalyear.fiscal_calendar(*US_FEDERAL):
assert a.end == datetime.datetime(2016, 9, 30, 23, 59, 59)
with fiscalyear.fiscal_calendar(*UK_PERSONAL):
assert a.end == datetime.datetime(2017, 4, 5, 23, 59, 59)
def test_q1(self, a):
assert a.q1 == fiscalyear.FiscalQuarter(2016, 1)
def test_q2(self, a):
assert a.q2 == fiscalyear.FiscalQuarter(2016, 2)
def test_q3(self, a):
assert a.q3 == fiscalyear.FiscalQuarter(2016, 3)
def test_q4(self, a):
assert a.q4 == fiscalyear.FiscalQuarter(2016, 4)
def test_is_leap(self, a, b, g):
# default US start_year='previous', start_month=10
assert isinstance(a.isleap, bool)
assert isinstance(g.isleap, bool)
with fiscalyear.fiscal_calendar(start_year="previous", start_month=1):
assert not a.isleap
assert b.isleap
with fiscalyear.fiscal_calendar(start_year="same", start_month=3):
assert not a.isleap
assert g.isleap
with fiscalyear.fiscal_calendar(start_year="same", start_month=1):
assert a.isleap
assert not g.isleap
def test_contains(self, a, b, c, d, e):
assert b in c
assert d not in a
assert d in b
assert e in b
assert fiscalyear.FiscalDateTime(2016, 1, 1, 0, 0, 0) in a
assert datetime.datetime(2016, 1, 1, 0, 0, 0) in a
assert fiscalyear.FiscalDate(2016, 1, 1) in a
assert datetime.date(2016, 1, 1) in a
with pytest.raises(TypeError):
"hello world" in a
def test_less_than(self, a, b):
assert a < b
with pytest.raises(TypeError):
a < 1
def test_less_than_equals(self, a, b, c):
assert a <= b <= c
with pytest.raises(TypeError):
a <= 1
def test_equals(self, b, c):
assert b == c
with pytest.raises(TypeError):
b == 1
def test_not_equals(self, a, b):
assert a != b
with pytest.raises(TypeError):
a != 1
def test_greater_than(self, a, b):
assert b > a
with pytest.raises(TypeError):
a > 1
def test_greater_than_equals(self, a, b, c):
assert c >= b >= a
with pytest.raises(TypeError):
a >= 1
class TestFiscalQuarter:
@pytest.fixture(scope="class")
def a(self):
return fiscalyear.FiscalQuarter(2016, 4)
@pytest.fixture(scope="class")
def b(self):
return fiscalyear.FiscalQuarter(2017, 1)
@pytest.fixture(scope="class")
def c(self):
return fiscalyear.FiscalQuarter(2017, 2)
@pytest.fixture(scope="class")
def d(self):
return fiscalyear.FiscalQuarter(2017, 3)
@pytest.fixture(scope="class")
def e(self):
return fiscalyear.FiscalQuarter(2017, 4)
@pytest.fixture(scope="class")
def f(self):
return fiscalyear.FiscalQuarter(2018, 1)
@pytest.fixture(scope="class")
def g(self):
return fiscalyear.FiscalQuarter("2018", "1")
def test_basic(self, a):
assert a.fiscal_year == 2016
assert a.fiscal_quarter == 4
def test_current(self, mocker):
mock_today = mocker.patch.object(fiscalyear.FiscalDate, "today")
mock_today.return_value = fiscalyear.FiscalDate(2016, 10, 1)
current = fiscalyear.FiscalQuarter.current()
assert current == fiscalyear.FiscalQuarter(2017, 1)
def test_repr(self, a):
assert repr(a) == "FiscalQuarter(2016, 4)"
def test_str(self, a):
assert str(a) == "FY2016 Q4"
def test_from_string(self, g):
assert g.fiscal_year == 2018
assert g.fiscal_quarter == 1
def test_wrong_type(self):
with pytest.raises(TypeError):
fiscalyear.FiscalQuarter(2017.5, 1.2)
with pytest.raises(TypeError):
fiscalyear.FiscalQuarter("hello", "world")
def test_out_of_range(self):
with pytest.raises(ValueError):
fiscalyear.FiscalQuarter(2017, 0)
with pytest.raises(ValueError):
fiscalyear.FiscalQuarter(2017, 5)
with pytest.raises(ValueError):
fiscalyear.FiscalQuarter(0, 2)
def test_deprecated(self, a):
with pytest.deprecated_call():
a.quarter
a.prev_quarter
a.next_quarter
def test_prev_fiscal_quarter(self, a, b, c, d, e, f):
assert a == b.prev_fiscal_quarter
assert b == c.prev_fiscal_quarter
assert c == d.prev_fiscal_quarter
assert d == e.prev_fiscal_quarter
assert e == f.prev_fiscal_quarter
def test_next_fiscal_quarter(self, a, b, c, d, e, f):
assert a.next_fiscal_quarter == b
assert b.next_fiscal_quarter == c
assert c.next_fiscal_quarter == d
assert d.next_fiscal_quarter == e
assert e.next_fiscal_quarter == f
def test_start(self, a):
with fiscalyear.fiscal_calendar(start_month=3):
assert a.start == datetime.datetime(2015, 12, 1, 0, 0)
def test_end(self, a):
with fiscalyear.fiscal_calendar(start_month=1, start_year="same"):
assert a.end == datetime.datetime(2016, 12, 31, 23, 59, 59)
def test_bad_start_year(self, a):
backup_start_year = fiscalyear.START_YEAR
fiscalyear.START_YEAR = "hello world"
with pytest.raises(ValueError):
a.start
fiscalyear.START_YEAR = backup_start_year
def test_q1_start(self, b):
with fiscalyear.fiscal_calendar(*US_FEDERAL):
assert b.start == datetime.datetime(2016, 10, 1, 0, 0, 0)
with fiscalyear.fiscal_calendar(*UK_PERSONAL):
assert b.start == datetime.datetime(2017, 4, 6, 0, 0, 0)
def test_q1_end(self, b):
with fiscalyear.fiscal_calendar(*US_FEDERAL):
assert b.end == datetime.datetime(2016, 12, 31, 23, 59, 59)
with fiscalyear.fiscal_calendar(*UK_PERSONAL):
assert b.end == datetime.datetime(2017, 7, 5, 23, 59, 59)
def test_q2_start(self, c):
with fiscalyear.fiscal_calendar(*US_FEDERAL):
assert c.start == datetime.datetime(2017, 1, 1, 0, 0, 0)
with fiscalyear.fiscal_calendar(*UK_PERSONAL):
assert c.start == datetime.datetime(2017, 7, 6, 0, 0, 0)
def test_q2_end(self, c):
with fiscalyear.fiscal_calendar(*US_FEDERAL):
assert c.end == datetime.datetime(2017, 3, 31, 23, 59, 59)
with fiscalyear.fiscal_calendar(*UK_PERSONAL):
assert c.end == datetime.datetime(2017, 10, 5, 23, 59, 59)
def test_q3_start(self, d):
with fiscalyear.fiscal_calendar(*US_FEDERAL):
assert d.start == datetime.datetime(2017, 4, 1, 0, 0, 0)
with fiscalyear.fiscal_calendar(*UK_PERSONAL):
assert d.start == datetime.datetime(2017, 10, 6, 0, 0, 0)
def test_q3_end(self, d):
with fiscalyear.fiscal_calendar(*US_FEDERAL):
assert d.end == datetime.datetime(2017, 6, 30, 23, 59, 59)
with fiscalyear.fiscal_calendar(*UK_PERSONAL):
assert d.end == datetime.datetime(2018, 1, 5, 23, 59, 59)
def test_q4_start(self, e):
with fiscalyear.fiscal_calendar(*US_FEDERAL):
assert e.start == datetime.datetime(2017, 7, 1, 0, 0, 0)
with fiscalyear.fiscal_calendar(*UK_PERSONAL):
assert e.start == datetime.datetime(2018, 1, 6, 0, 0, 0)
def test_q4_end(self, e):
with fiscalyear.fiscal_calendar(*US_FEDERAL):
assert e.end == datetime.datetime(2017, 9, 30, 23, 59, 59)
with fiscalyear.fiscal_calendar(*UK_PERSONAL):
assert e.end == datetime.datetime(2018, 4, 5, 23, 59, 59)
def test_contains(self, a, f, g):
assert a not in f
assert f in g
assert fiscalyear.FiscalDateTime(2016, 8, 1, 0, 0, 0) in a
assert datetime.datetime(2016, 8, 1, 0, 0, 0) in a
assert fiscalyear.FiscalDate(2016, 8, 1) in a
assert datetime.date(2016, 8, 1) in a
with pytest.raises(TypeError):
fiscalyear.FiscalYear(2016) in a
def test_less_than(self, a, b, c, d, e, f):
assert a < b < c < d < e < f
with pytest.raises(TypeError):
a < 1
def test_less_than_equals(self, a, b, c, d, e, f, g):
assert a <= b <= c <= d <= e <= f <= g
with pytest.raises(TypeError):
a <= 1
def test_equals(self, f, g):
assert f == g
with pytest.raises(TypeError):
f == 1
def test_not_equals(self, b, c, g):
# Same year, different quarter
assert b != c
# Same quarter, different year
assert b != g
with pytest.raises(TypeError):
b != 1
def test_greater_than(self, a, b, c, d, e, f):
assert f > e > d > c > b > a
with pytest.raises(TypeError):
a > 1
def test_greater_than_equals(self, a, b, c, d, e, f, g):
assert g >= f >= e >= d >= c >= b >= a
with pytest.raises(TypeError):
a >= 1
class TestFiscalMonth:
@pytest.fixture(scope="class")
def a(self):
return fiscalyear.FiscalMonth(2016, 1)
@pytest.fixture(scope="class")
def b(self):
return fiscalyear.FiscalMonth(2016, 2)
@pytest.fixture(scope="class")
def c(self):
return fiscalyear.FiscalMonth("2016", "2")
@pytest.fixture(scope="class")
def d(self):
return fiscalyear.FiscalQuarter(2016, 1)
@pytest.fixture(scope="class")
def e(self):
return fiscalyear.FiscalMonth(2016, 12)
@pytest.fixture(scope="class")
def f(self):
return fiscalyear.FiscalQuarter(2017, 1)
def test_basic(self, a):
assert a.fiscal_year == 2016
assert a.fiscal_month == 1
def test_current(self, mocker):
mock_today = mocker.patch.object(fiscalyear.FiscalDate, "today")
mock_today.return_value = fiscalyear.FiscalDate(2016, 10, 1)
current = fiscalyear.FiscalMonth.current()
assert current == fiscalyear.FiscalMonth(2017, 1)
def test_repr(self, a):
assert repr(a) == "FiscalMonth(2016, 1)"
def test_str(self, a):
assert str(a) == "FY2016 FM1"
def test_from_string(self, c):
assert c.fiscal_year == 2016
def test_wrong_type(self):
with pytest.raises(TypeError):
fiscalyear.FiscalMonth(2016.5)
with pytest.raises(TypeError):
fiscalyear.FiscalMonth("hello world")
def test_out_of_range(self):
with pytest.raises(ValueError):
fiscalyear.FiscalMonth(2016, 0)
with pytest.raises(ValueError):
fiscalyear.FiscalMonth(2016, -12)
def test_prev_fiscal_year(self, a, b):
assert a == b.prev_fiscal_month
assert a.prev_fiscal_month == fiscalyear.FiscalMonth(2015, 12)
def test_next_fiscal_year(self, a, b):
assert a.next_fiscal_month == b
def test_start(self, a, e):
assert a.start == fiscalyear.FiscalYear(a.fiscal_year).start
assert e.start == fiscalyear.FiscalDateTime(2016, 9, 1, 0, 0, 0)
with fiscalyear.fiscal_calendar(*US_FEDERAL):
assert a.start == datetime.datetime(2015, 10, 1, 0, 0, 0)
with fiscalyear.fiscal_calendar(*UK_PERSONAL):
assert a.start == datetime.datetime(2016, 4, 6, 0, 0, 0)
assert fiscalyear.FiscalMonth(2016, 12).start == datetime.datetime(
2017, 3, 6, 0, 0, 0
)
def test_end(self, e):
assert e.end == fiscalyear.FiscalYear(e.fiscal_year).end
with fiscalyear.fiscal_calendar(*US_FEDERAL):
assert e.end == datetime.datetime(2016, 9, 30, 23, 59, 59)
with fiscalyear.fiscal_calendar(*UK_PERSONAL):
assert e.end == datetime.datetime(2017, 4, 5, 23, 59, 59)
def test_contains(self, a, b, c, d, f):
assert b in c
assert a not in f
assert b in d
assert fiscalyear.FiscalDateTime(2015, 10, 1, 0, 0, 0) in a
assert datetime.datetime(2015, 10, 1, 0, 0, 0) in a
assert fiscalyear.FiscalDate(2015, 10, 1) in a
assert datetime.date(2015, 10, 1) in a
with pytest.raises(TypeError):
"hello world" in a
def test_less_than(self, a, b):
assert a < b
with pytest.raises(TypeError):
a < 1
def test_less_than_equals(self, a, b, c):
assert a <= b <= c
with pytest.raises(TypeError):
a <= 1
def test_equals(self, b, c):
assert b == c
with pytest.raises(TypeError):
b == 1
def test_not_equals(self, a, b):
assert a != b
with pytest.raises(TypeError):
a != 1
def test_greater_than(self, a, b):
assert b > a
with pytest.raises(TypeError):
a > 1
def test_greater_than_equals(self, a, b, c):
assert c >= b >= a
with pytest.raises(TypeError):
a >= 1
class TestFiscalDay:
@pytest.fixture(scope="class")
def a(self):
return fiscalyear.FiscalDay(2016, 1)
@pytest.fixture(scope="class")
def b(self):
return fiscalyear.FiscalDay(2016, 2)
@pytest.fixture(scope="class")
def c(self):
return fiscalyear.FiscalDay("2016", "2")
@pytest.fixture(scope="class")
def e(self):
return fiscalyear.FiscalDay(2016, 366)
@pytest.fixture(scope="class")
def f(self):
return fiscalyear.FiscalDay(2017, 1)
def test_basic(self, a):
assert a.fiscal_year == 2016
assert a.fiscal_day == 1
assert a.fiscal_month == 1
assert a.fiscal_quarter == 1
def test_current(self, mocker):
mock_today = mocker.patch.object(fiscalyear.FiscalDate, "today")
mock_today.return_value = fiscalyear.FiscalDate(2016, 10, 1)
current = fiscalyear.FiscalDay.current()
assert current == fiscalyear.FiscalDay(2017, 1)
def test_repr(self, a):
assert repr(a) == "FiscalDay(2016, 1)"
def test_str(self, a):
assert str(a) == "FY2016 FD1"
def test_from_string(self, c):
assert c.fiscal_year == 2016
assert c.fiscal_day == 2
def test_wrong_type(self):
with pytest.raises(TypeError):
fiscalyear.FiscalDay(2016.5)
with pytest.raises(TypeError):
fiscalyear.FiscalDay("hello world")
def test_out_of_range(self):
with pytest.raises(ValueError):
fiscalyear.FiscalDay(2016, 0)
with pytest.raises(ValueError):
fiscalyear.FiscalDay(2016, -364)
def test_prev_fiscal_day(self, a, b, f):
assert a == b.prev_fiscal_day
assert a.prev_fiscal_day == fiscalyear.FiscalDay(2015, 365)
assert f.prev_fiscal_day == fiscalyear.FiscalDay(2016, 366)
def test_next_fiscal_day(self, a, b):
assert a.next_fiscal_day == b
def test_start(self, a, e):
assert a.start == fiscalyear.FiscalYear(a.fiscal_year).start
assert e.start == fiscalyear.FiscalDateTime(2016, 9, 30, 0, 0, 0)
with fiscalyear.fiscal_calendar(*US_FEDERAL):
assert a.start == datetime.datetime(2015, 10, 1, 0, 0, 0)
with fiscalyear.fiscal_calendar(*UK_PERSONAL):
assert a.start == datetime.datetime(2016, 4, 6, 0, 0, 0)
def test_end(self, e):
assert e.end == fiscalyear.FiscalYear(e.fiscal_year).end
with fiscalyear.fiscal_calendar(*US_FEDERAL):
assert e.end == datetime.datetime(2016, 9, 30, 23, 59, 59)
with fiscalyear.fiscal_calendar(*UK_PERSONAL):
assert e.end == datetime.datetime(2017, 4, 5, 23, 59, 59)
def test_leap_year(self):
assert fiscalyear.FiscalDate(2016, 1, 1).fiscal_day == 93
assert fiscalyear.FiscalDate(2016, 2, 29).fiscal_day == 152
assert fiscalyear.FiscalDate(2017, 3, 1).fiscal_day == 152
assert fiscalyear.FiscalDate(2016, 9, 30).fiscal_day == 366
assert fiscalyear.FiscalDate(2017, 9, 30).fiscal_day == 365
assert fiscalyear.FiscalDate(2018, 9, 30).fiscal_day == 365
def test_contains(self, a, b, c, f):
assert b in c
assert a not in f
assert fiscalyear.FiscalDateTime(2015, 10, 1, 0, 0, 0) in a
assert datetime.datetime(2015, 10, 1, 0, 0, 0) in a
assert fiscalyear.FiscalDate(2015, 10, 1) in a
assert datetime.date(2015, 10, 1) in a
assert b in fiscalyear.FiscalMonth(2016, 1)
assert b in fiscalyear.FiscalQuarter(2016, 1)
assert b in fiscalyear.FiscalYear(2016)
with pytest.raises(TypeError):
"hello world" in a
def test_less_than(self, a, b):
assert a < b
with pytest.raises(TypeError):
a < 1
def test_less_than_equals(self, a, b, c):
assert a <= b <= c
with pytest.raises(TypeError):
a <= 1
def test_equals(self, b, c):
assert b == c
with pytest.raises(TypeError):
b == 1
def test_not_equals(self, a, b):
assert a != b
with pytest.raises(TypeError):
a != 1
def test_greater_than(self, a, b):
assert b > a
with pytest.raises(TypeError):
a > 1
def test_greater_than_equals(self, a, b, c):
assert c >= b >= a
with pytest.raises(TypeError):
a >= 1
class TestFiscalDate:
@pytest.fixture(scope="class")
def a(self):
return fiscalyear.FiscalDate(2017, 1, 1)
@pytest.fixture(scope="class")
def b(self):
return fiscalyear.FiscalDate(2017, 8, 31)
@pytest.fixture(scope="class")
def c(self):
return fiscalyear.FiscalDate(2017, 11, 15)
def test_basic(self, a):
assert a.year == 2017
assert a.month == 1
assert a.day == 1
assert a.fiscal_year == 2017
assert a.fiscal_month == 4
assert a.fiscal_quarter == 2
def test_fiscal_periods(self, a, c):
with fiscalyear.fiscal_calendar(*US_FEDERAL):
assert a.fiscal_year == 2017
assert a.fiscal_month == 4
assert c.fiscal_year == 2018
assert c.fiscal_month == 2
with fiscalyear.fiscal_calendar(*UK_PERSONAL):
assert a.fiscal_year == 2016
assert a.fiscal_month == 9
assert c.fiscal_year == 2017
assert c.fiscal_month == 8
def test_prev_fiscal_year(self, a):
assert a.prev_fiscal_year == fiscalyear.FiscalYear(2016)
def test_next_fiscal_year(self, a):
assert a.next_fiscal_year == fiscalyear.FiscalYear(2018)
def test_prev_fiscal_quarter(self, a, c):
assert a.prev_fiscal_quarter == fiscalyear.FiscalQuarter(2017, 1)
assert c.prev_fiscal_quarter == fiscalyear.FiscalQuarter(2017, 4)
def test_next_fiscal_quarter(self, a, c):
assert a.next_fiscal_quarter == fiscalyear.FiscalQuarter(2017, 3)
assert c.next_fiscal_quarter == fiscalyear.FiscalQuarter(2018, 2)
def test_prev_fiscal_month(self, a):
assert a.prev_fiscal_month == fiscalyear.FiscalMonth(2017, 3)
def test_next_fiscal_month(self, a):
assert a.next_fiscal_month == fiscalyear.FiscalMonth(2017, 5)
def test_prev_fiscal_day(self, a):
assert a.prev_fiscal_day == fiscalyear.FiscalDay(2017, 92)
def test_next_fiscal_day(self, a):
assert a.next_fiscal_day == fiscalyear.FiscalDay(2017, 94)
def test_deprecated(self, a):
with pytest.deprecated_call():
a.quarter
a.prev_quarter
a.next_quarter
class TestFiscalDateTime:
@pytest.fixture(scope="class")
def a(self):
return fiscalyear.FiscalDateTime(2017, 1, 1, 0, 0, 0)
@pytest.fixture(scope="class")
def b(self):
return fiscalyear.FiscalDateTime(2017, 8, 31, 23, 59, 59)
@pytest.fixture(scope="class")
def c(self):
return fiscalyear.FiscalDateTime(2017, 11, 15, 12, 4, 30)
def test_basic(self, a):
assert a.year == 2017
assert a.month == 1
assert a.day == 1
assert a.hour == 0
assert a.minute == 0
assert a.second == 0
assert a.fiscal_year == 2017
assert a.fiscal_quarter == 2
def test_fiscal_periods(self, a, c):
with fiscalyear.fiscal_calendar(*US_FEDERAL):
assert a.fiscal_year == 2017
assert a.fiscal_month == 4
assert c.fiscal_year == 2018
assert c.fiscal_month == 2
with fiscalyear.fiscal_calendar(*UK_PERSONAL):
assert a.fiscal_year == 2016
assert a.fiscal_month == 9
assert c.fiscal_year == 2017
assert c.fiscal_month == 8
def test_prev_fiscal_year(self, a):
assert a.prev_fiscal_year == fiscalyear.FiscalYear(2016)
def test_next_fiscal_year(self, a):
assert a.next_fiscal_year == fiscalyear.FiscalYear(2018)
def test_prev_fiscal_quarter(self, a, c):
assert a.prev_fiscal_quarter == fiscalyear.FiscalQuarter(2017, 1)
assert c.prev_fiscal_quarter == fiscalyear.FiscalQuarter(2017, 4)
def test_next_fiscal_quarter(self, a, c):
assert a.next_fiscal_quarter == fiscalyear.FiscalQuarter(2017, 3)
assert c.next_fiscal_quarter == fiscalyear.FiscalQuarter(2018, 2)
def test_prev_fiscal_month(self, a):
assert a.prev_fiscal_month == fiscalyear.FiscalMonth(2017, 3)
def test_next_fiscal_month(self, a):
assert a.next_fiscal_month == fiscalyear.FiscalMonth(2017, 5)
def test_prev_fiscal_day(self, a):
assert a.prev_fiscal_day == fiscalyear.FiscalDay(2017, 92)
def test_next_fiscal_day(self, a):
assert a.next_fiscal_day == fiscalyear.FiscalDay(2017, 94)
def test_deprecated(self, a):
with pytest.deprecated_call():
a.quarter
a.prev_quarter
a.next_quarter
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: list_plugin_v1.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='list_plugin_v1.proto',
package='plugin',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x14list_plugin_v1.proto\x12\x06plugin\"5\n\x13ListPluginV1Request\x12\x0c\n\x04page\x18\x01 \x01(\x05\x12\x10\n\x08pageSize\x18\x02 \x01(\x05\"\x8f\x04\n\x14ListPluginV1Response\x12\x0c\n\x04page\x18\x01 \x01(\x05\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\r\n\x05total\x18\x03 \x01(\x05\x12/\n\x04list\x18\x04 \x03(\x0b\x32!.plugin.ListPluginV1Response.List\x1a\x95\x03\n\x04List\x12H\n\x0elastestVersion\x18\x01 \x01(\x0b\x32\x30.plugin.ListPluginV1Response.List.LastestVersion\x12\x15\n\rdeployedCount\x18\x02 \x01(\x05\x12\n\n\x02id\x18\x03 \x01(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\x12\n\ndeployPath\x18\x05 \x03(\t\x12\x0c\n\x04memo\x18\x06 \x01(\t\x12\x15\n\rrepoPackageId\x18\x07 \x01(\t\x12\x0f\n\x07\x63reator\x18\x08 \x01(\t\x12\r\n\x05\x63time\x18\t \x01(\x05\x12\r\n\x05mtime\x18\n \x01(\x05\x12\x10\n\x08isLocked\x18\x0b \x01(\x08\x1a\x97\x01\n\x0eLastestVersion\x12\n\n\x02id\x18\x01 \x01(\t\x12\x15\n\rrepoVersionId\x18\x02 \x01(\t\x12\x13\n\x0bversionName\x18\x03 \x01(\t\x12\x0c\n\x04memo\x18\x04 \x01(\t\x12\x10\n\x08pluginId\x18\x05 \x01(\t\x12\x0f\n\x07\x63reator\x18\x06 \x01(\t\x12\r\n\x05\x63time\x18\x07 \x01(\x05\x12\r\n\x05mtime\x18\x08 \x01(\x05\"{\n\x1bListPluginV1ResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12*\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\x1c.plugin.ListPluginV1Responseb\x06proto3')
)
_LISTPLUGINV1REQUEST = _descriptor.Descriptor(
name='ListPluginV1Request',
full_name='plugin.ListPluginV1Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='page', full_name='plugin.ListPluginV1Request.page', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pageSize', full_name='plugin.ListPluginV1Request.pageSize', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=32,
serialized_end=85,
)
_LISTPLUGINV1RESPONSE_LIST_LASTESTVERSION = _descriptor.Descriptor(
name='LastestVersion',
full_name='plugin.ListPluginV1Response.List.LastestVersion',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='plugin.ListPluginV1Response.List.LastestVersion.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='repoVersionId', full_name='plugin.ListPluginV1Response.List.LastestVersion.repoVersionId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='versionName', full_name='plugin.ListPluginV1Response.List.LastestVersion.versionName', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='memo', full_name='plugin.ListPluginV1Response.List.LastestVersion.memo', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pluginId', full_name='plugin.ListPluginV1Response.List.LastestVersion.pluginId', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creator', full_name='plugin.ListPluginV1Response.List.LastestVersion.creator', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ctime', full_name='plugin.ListPluginV1Response.List.LastestVersion.ctime', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mtime', full_name='plugin.ListPluginV1Response.List.LastestVersion.mtime', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=464,
serialized_end=615,
)
_LISTPLUGINV1RESPONSE_LIST = _descriptor.Descriptor(
name='List',
full_name='plugin.ListPluginV1Response.List',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lastestVersion', full_name='plugin.ListPluginV1Response.List.lastestVersion', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='deployedCount', full_name='plugin.ListPluginV1Response.List.deployedCount', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='plugin.ListPluginV1Response.List.id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='plugin.ListPluginV1Response.List.name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='deployPath', full_name='plugin.ListPluginV1Response.List.deployPath', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='memo', full_name='plugin.ListPluginV1Response.List.memo', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='repoPackageId', full_name='plugin.ListPluginV1Response.List.repoPackageId', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creator', full_name='plugin.ListPluginV1Response.List.creator', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ctime', full_name='plugin.ListPluginV1Response.List.ctime', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mtime', full_name='plugin.ListPluginV1Response.List.mtime', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='isLocked', full_name='plugin.ListPluginV1Response.List.isLocked', index=10,
number=11, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_LISTPLUGINV1RESPONSE_LIST_LASTESTVERSION, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=210,
serialized_end=615,
)
_LISTPLUGINV1RESPONSE = _descriptor.Descriptor(
name='ListPluginV1Response',
full_name='plugin.ListPluginV1Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='page', full_name='plugin.ListPluginV1Response.page', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page_size', full_name='plugin.ListPluginV1Response.page_size', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='total', full_name='plugin.ListPluginV1Response.total', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='list', full_name='plugin.ListPluginV1Response.list', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_LISTPLUGINV1RESPONSE_LIST, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=88,
serialized_end=615,
)
_LISTPLUGINV1RESPONSEWRAPPER = _descriptor.Descriptor(
name='ListPluginV1ResponseWrapper',
full_name='plugin.ListPluginV1ResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='plugin.ListPluginV1ResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='plugin.ListPluginV1ResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='plugin.ListPluginV1ResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='plugin.ListPluginV1ResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=617,
serialized_end=740,
)
_LISTPLUGINV1RESPONSE_LIST_LASTESTVERSION.containing_type = _LISTPLUGINV1RESPONSE_LIST
_LISTPLUGINV1RESPONSE_LIST.fields_by_name['lastestVersion'].message_type = _LISTPLUGINV1RESPONSE_LIST_LASTESTVERSION
_LISTPLUGINV1RESPONSE_LIST.containing_type = _LISTPLUGINV1RESPONSE
_LISTPLUGINV1RESPONSE.fields_by_name['list'].message_type = _LISTPLUGINV1RESPONSE_LIST
_LISTPLUGINV1RESPONSEWRAPPER.fields_by_name['data'].message_type = _LISTPLUGINV1RESPONSE
DESCRIPTOR.message_types_by_name['ListPluginV1Request'] = _LISTPLUGINV1REQUEST
DESCRIPTOR.message_types_by_name['ListPluginV1Response'] = _LISTPLUGINV1RESPONSE
DESCRIPTOR.message_types_by_name['ListPluginV1ResponseWrapper'] = _LISTPLUGINV1RESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ListPluginV1Request = _reflection.GeneratedProtocolMessageType('ListPluginV1Request', (_message.Message,), {
'DESCRIPTOR' : _LISTPLUGINV1REQUEST,
'__module__' : 'list_plugin_v1_pb2'
# @@protoc_insertion_point(class_scope:plugin.ListPluginV1Request)
})
_sym_db.RegisterMessage(ListPluginV1Request)
ListPluginV1Response = _reflection.GeneratedProtocolMessageType('ListPluginV1Response', (_message.Message,), {
'List' : _reflection.GeneratedProtocolMessageType('List', (_message.Message,), {
'LastestVersion' : _reflection.GeneratedProtocolMessageType('LastestVersion', (_message.Message,), {
'DESCRIPTOR' : _LISTPLUGINV1RESPONSE_LIST_LASTESTVERSION,
'__module__' : 'list_plugin_v1_pb2'
# @@protoc_insertion_point(class_scope:plugin.ListPluginV1Response.List.LastestVersion)
})
,
'DESCRIPTOR' : _LISTPLUGINV1RESPONSE_LIST,
'__module__' : 'list_plugin_v1_pb2'
# @@protoc_insertion_point(class_scope:plugin.ListPluginV1Response.List)
})
,
'DESCRIPTOR' : _LISTPLUGINV1RESPONSE,
'__module__' : 'list_plugin_v1_pb2'
# @@protoc_insertion_point(class_scope:plugin.ListPluginV1Response)
})
_sym_db.RegisterMessage(ListPluginV1Response)
_sym_db.RegisterMessage(ListPluginV1Response.List)
_sym_db.RegisterMessage(ListPluginV1Response.List.LastestVersion)
ListPluginV1ResponseWrapper = _reflection.GeneratedProtocolMessageType('ListPluginV1ResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _LISTPLUGINV1RESPONSEWRAPPER,
'__module__' : 'list_plugin_v1_pb2'
# @@protoc_insertion_point(class_scope:plugin.ListPluginV1ResponseWrapper)
})
_sym_db.RegisterMessage(ListPluginV1ResponseWrapper)
# @@protoc_insertion_point(module_scope)
|
import cv2
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--folder", required=True,
help="path to folder containing images")
args = parser.parse_args()
extensions = ('.png','.jpg','.jpeg')
filenames = [file for file in os.listdir(args.folder) if file.lower().endswith(extensions)]
for filename in filenames:
print("processing:", filename)
# load the input image, resize it, and convert it to grayscale
image = cv2.imread(f"{args.folder}/{filename}")
cv2.imshow('Image', image)
key = cv2.waitKey(5000)
if key == ord('a'):
print("happy")
cv2.imwrite(f"happy/{filename}", image)
if key == ord('l'):
print("not happy")
cv2.imwrite(f"nothappy/{filename}", image)
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A StatusReceiver module to mail someone when a step warns/fails.
Since the behavior is very similar to the MailNotifier, we simply inherit from
it and also reuse some of its methods to send emails.
"""
import datetime
import os
import re
import time
import urllib
try:
# Create a block to work around evil sys.modules manipulation in
# email/__init__.py that triggers pylint false positives.
# pylint has issues importing it.
# pylint: disable=E0611,F0401
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.Utils import formatdate
except ImportError:
raise
from buildbot.status.builder import SUCCESS, FAILURE
from buildbot.status.mail import MailNotifier
from twisted.internet import defer
from twisted.python import log
from master.build_sheriffs import BuildSheriffs
from master import build_utils
class ChromiumNotifier(MailNotifier):
"""This is a status notifier which closes the tree upon failures.
See builder.interfaces.IStatusReceiver to have more information about the
parameters type."""
# Overloaded functions need to be member even if they don't access self.
# pylint: disable=R0201,W0221
_CATEGORY_SPLITTER = '|'
_NAME_UNDECORATOR = re.compile(r'(.*\S)\s*(\[([^\[\]]*)\])$')
def __init__(self, reply_to=None, categories_steps=None,
exclusions=None, forgiving_steps=None, status_header=None,
use_getname=False, sheriffs=None,
public_html='public_html', minimum_delay_between_alert=600,
enable_mail=True, **kwargs):
"""Constructor with following specific arguments (on top of base class').
@type categories_steps: Dictionary of category string mapped to a list of
step strings.
@param categories_steps: For each category name we can specify the steps we
want to check for success to keep the tree opened.
An empty list of steps means that we simply check
for results == FAILURE to close the tree. Defaults
to None for the dictionary, which means all
categories, and the empty string category can be
used to say all builders.
@type exclusions: Dictionary of strings to arrays of strings.
@param exclusions: The key is a builder name for which we want to ignore a
series of step names set as the value in the form of an
array of strings. Defaults to None.
@type forgiving_steps: List of strings.
@param forgiving_steps: The list of steps for which a failure email should
NOT be sent to the blame list.
@type status_header: String.
@param status_header: Formatted header used in mail message.
@type minimum_delay_between_alert: Integer.
@param minimum_delay_between_alert: Don't send failure e-mails more often
than the given value (in seconds).
@type sheriffs: List of strings.
@param sheriffs: The list of sheriff type names to be used for the set of
sheriffs. The final destination changes over time.
@type public_html: String.
@param public_html: Directory from which any additional configuration is
read. E.g. sheriff classes.
@type use_getname: Boolean.
@param use_getname: If true, step name is taken from getName(), otherwise
the step name is taken from getText().
@type enable_mail: Boolean.
@param enable_mail: If true, mail is sent, otherwise mail is formatted
and logged, but not sent.
"""
# Change the default.
kwargs.setdefault('sendToInterestedUsers', False)
kwargs.setdefault('subject',
'buildbot %(result)s in %(projectName)s on %(builder)s')
MailNotifier.__init__(self, **kwargs)
self.reply_to = reply_to
self.categories_steps = categories_steps
self.exclusions = exclusions or {}
self.forgiving_steps = forgiving_steps or []
self.status_header = status_header
assert self.status_header
self.minimum_delay_between_alert = minimum_delay_between_alert
self.sheriffs = sheriffs or []
self.public_html = public_html
self.use_getname = use_getname
self.enable_mail = enable_mail
self._last_time_mail_sent = None
def isInterestingBuilder(self, builder_status):
"""Confirm if we are interested in this builder."""
builder_name = builder_status.getName()
if builder_name in self.exclusions and not self.exclusions[builder_name]:
return False
if not self.categories_steps or '' in self.categories_steps:
# We don't filter per step.
return True
if not builder_status.category:
return False
# We hack categories here. This should use a different builder attribute.
for category in builder_status.category.split(self._CATEGORY_SPLITTER):
if category in self.categories_steps:
return True
return False
def isInterestingStep(self, build_status, step_status, results):
"""Watch all steps that don't end in success."""
return results[0] != SUCCESS
def builderAdded(self, builder_name, builder_status):
"""Only subscribe to builders we are interested in.
@type name: string
@type builder: L{buildbot.status.builder.BuilderStatus} which implements
L{buildbot.interfaces.IBuilderStatus}
"""
# Verify that MailNotifier would subscribe to this builder.
if not MailNotifier.builderAdded(self, builder_name, builder_status):
return None
# Next check that ChromiumNotifier would subscribe.
if self.isInterestingBuilder(builder_status):
return self # subscribe to this builder
def buildStarted(self, builder_name, build_status):
"""A build has started allowing us to register for stepFinished.
@type builder_name: string
@type build_status: L{buildbot.status.builder.BuildStatus} which implements
L{buildbot.interfaces.IBuildStatus}
"""
if self.isInterestingBuilder(build_status.getBuilder()):
return self
def buildFinished(self, builder_name, build_status, results):
"""Must be overloaded to avoid the base class sending email."""
pass
def getName(self, step_status):
if not self.use_getname:
# TODO(maruel): This code needs to die.
texts = step_status.getText()
if texts:
return texts[0]
return step_status.getName()
def getGenericName(self, step_name):
reduced_name = self._NAME_UNDECORATOR.match(step_name.strip())
if reduced_name:
return reduced_name.group(1)
return step_name.strip()
def stepFinished(self, build_status, step_status, results):
"""A build step has just finished.
@type builder_status: L{buildbot.status.builder.BuildStatus}
@type step_status: L{buildbot.status.builder.BuildStepStatus}
@type results: tuple described at
L{buildbot.interfaces.IBuildStepStatus.getResults}
"""
if not self.isInterestingStep(build_status, step_status, results):
return
builder_status = build_status.getBuilder()
builder_name = builder_status.getName()
step_name = self.getName(step_status)
step_class = self.getGenericName(step_name)
if builder_name in self.exclusions:
if step_class in self.exclusions[builder_name]:
return
if not self.categories_steps:
# No filtering on steps.
return self.buildMessage(builder_name, build_status, results, step_name)
# Now get all the steps we must check for this builder.
steps_to_check = []
wildcard = False
if builder_status.category:
for category in builder_status.category.split(self._CATEGORY_SPLITTER):
if self.categories_steps.get(category) == '*':
wildcard = True
break
if category in self.categories_steps:
steps_to_check += self.categories_steps[category]
if '' in self.categories_steps:
steps_to_check += self.categories_steps['']
if wildcard or step_class in steps_to_check:
return self.buildMessage(builder_name, build_status, results, step_name)
def getFinishedMessage(self, dummy, builder_name, build_status, step_name):
"""Called after being done sending the email."""
return defer.succeed(0)
def sendMessage(self, message, recipients):
if os.path.exists('.suppress_mailer') or not self.enable_mail:
format_string = 'Not sending mail to %r (suppressed!):\n%s'
if not self.enable_mail:
format_string = 'Not sending mail to %r:\n%s'
log.msg(format_string % (recipients, str(message)))
return None
return MailNotifier.sendMessage(self, message, recipients)
def shouldBlameCommitters(self, step_name):
if self.getGenericName(step_name) not in self.forgiving_steps:
return True
return False
def _logMail(self, res, recipients, message):
log.msg('Not sending mail to %r:\n%s' % (recipients, str(message)))
def buildMessage(self, builder_name, build_status, results, step_name):
"""Send an email about the tree closing.
Don't attach the patch as MailNotifier.buildMessage does.
@type builder_name: string
@type build_status: L{buildbot.status.builder.BuildStatus}
@type step_name: name of this step
"""
# TODO(maruel): Update function signature to match
# mail.MailNotifier.buildMessage().
if (self._last_time_mail_sent and self._last_time_mail_sent >
time.time() - self.minimum_delay_between_alert):
# Rate limit tree alerts.
log.msg('Suppressing repeat email')
return
log.msg('About to email')
self._last_time_mail_sent = time.time()
# TODO(maruel): Use self.createEmail().
blame_interested_users = self.shouldBlameCommitters(step_name)
project_name = self.master_status.getTitle()
revisions_list = build_utils.getAllRevisions(build_status)
build_url = self.master_status.getURLForThing(build_status)
waterfall_url = self.master_status.getBuildbotURL()
status_text = self.status_header % {
'buildbotURL': waterfall_url,
'builder': builder_name,
'builderName': builder_name,
'buildProperties': build_status.getProperties(),
'buildURL': build_url,
'project': project_name,
'reason': build_status.getReason(),
'slavename': build_status.getSlavename(),
'steps': step_name,
}
# Use the first line as a title.
status_title = status_text.split('\n', 1)[0]
blame_list = ','.join(build_status.getResponsibleUsers())
revisions_string = ''
latest_revision = 0
if revisions_list:
revisions_string = ', '.join([str(rev) for rev in revisions_list])
latest_revision = max([rev for rev in revisions_list])
if results[0] == FAILURE:
result = 'failure'
else:
result = 'warning'
# Generate a HTML table looking like the waterfall.
# WARNING: Gmail ignores embedded CSS style. I don't know how to fix that so
# meanwhile, I just won't embedded the CSS style.
html_content = (
"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>%s</title>
</head>
<body>
<a href="%s">%s</a><p>
%s<p>
<a href="%s">%s</a><p>
Revision: %s<br>
""" % (status_title, waterfall_url, waterfall_url,
status_text.replace('\n', "<br>\n"), build_url,
build_url, revisions_string))
# Only include the blame list if relevant.
if blame_interested_users:
html_content += " Blame list: %s<p>\n" % blame_list
html_content += build_utils.EmailableBuildTable(build_status, waterfall_url)
html_content += "<p>"
# Add the change list descriptions. getChanges() returns a tuple of
# buildbot.changes.changes.Change
for change in build_status.getChanges():
html_content += change.asHTML()
html_content += "</body>\n</html>"
# Simpler text content for non-html aware clients.
text_content = (
"""%s
%s
%swaterfall?builder=%s
--=> %s <=--
Revision: %s
Blame list: %s
Buildbot waterfall: http://build.chromium.org/
""" % (status_title,
build_url,
urllib.quote(waterfall_url, '/:'),
urllib.quote(builder_name),
status_text,
revisions_string,
blame_list))
m = MIMEMultipart('alternative')
# The HTML message, is best and preferred.
m.attach(MIMEText(text_content, 'plain', 'iso-8859-1'))
m.attach(MIMEText(html_content, 'html', 'iso-8859-1'))
m['Date'] = formatdate(localtime=True)
m['Subject'] = self.subject % {
'result': result,
'projectName': project_name,
'builder': builder_name,
'reason': build_status.getReason(),
'revision': str(latest_revision),
'buildnumber': str(build_status.getNumber()),
'date': str(datetime.date.today()),
'steps': step_name,
'slavename': build_status.getSlavename(),
}
m['From'] = self.fromaddr
if self.reply_to:
m['Reply-To'] = self.reply_to
recipients = list(self.extraRecipients[:])
if self.sheriffs:
recipients.extend(BuildSheriffs.GetSheriffs(classes=self.sheriffs,
data_dir=self.public_html))
dl = []
if self.sendToInterestedUsers and self.lookup and blame_interested_users:
for u in build_status.getInterestedUsers():
d = defer.maybeDeferred(self.lookup.getAddress, u)
d.addCallback(recipients.append)
dl.append(d)
defered_object = defer.DeferredList(dl)
if not self.enable_mail:
defered_object.addCallback(self._logMail, recipients, m)
else:
defered_object.addCallback(self._gotRecipients, recipients, m)
defered_object.addCallback(self.getFinishedMessage, builder_name,
build_status, step_name)
return defered_object
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.intersphinx',
'zuul_sphinx',
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'openstack-zuul-jobs'
copyright = u'2017, Zuul contributors'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow.compat.v1 as tf
from smith import loss_fns
class LossFnsTest(tf.test.TestCase):
def test_get_prediction_loss_cosine(self):
input_tensor_1 = tf.constant(
[[0.5, 0.7, 0.8, 0.9, 0.1, 0.1], [0.1, 0.3, 0.3, 0.3, 0.1, 0.1]],
dtype=tf.float32)
input_tensor_2 = tf.constant(
[[0.1, 0.2, 0.2, 0.2, 0.2, 0.1], [0.1, 0.4, 0.4, 0.4, 0.1, 0.1]],
dtype=tf.float32)
labels = tf.constant([0, 1.0], dtype=tf.float32)
neg_to_pos_example_ratio = 1.0
similarity_score_amplifier = 6.0
loss, per_example_loss, similarities = \
loss_fns.get_prediction_loss_cosine(
input_tensor_1=input_tensor_1,
input_tensor_2=input_tensor_2,
labels=labels,
similarity_score_amplifier=similarity_score_amplifier,
neg_to_pos_example_ratio=neg_to_pos_example_ratio)
with tf.Session() as sess:
sess.run([tf.global_variables_initializer()])
loss_numpy = sess.run(loss)
per_example_loss_numpy = sess.run(per_example_loss)
similarities_numpy = sess.run(similarities)
self.assertEqual(loss_numpy.shape, ())
self.assertDTypeEqual(loss_numpy, np.float32)
self.assertEqual(per_example_loss_numpy.shape, (2,))
self.assertDTypeEqual(per_example_loss_numpy, np.float32)
self.assertEqual(similarities_numpy.shape, (2,))
self.assertDTypeEqual(similarities_numpy, np.float32)
if __name__ == '__main__':
tf.test.main()
|
"""Contains SQL Dialects."""
from typing import NamedTuple
from sqlfluff.core.dialects.dialect_ansi import ansi_dialect
from sqlfluff.core.dialects.dialect_bigquery import bigquery_dialect
from sqlfluff.core.dialects.dialect_mysql import mysql_dialect
from sqlfluff.core.dialects.dialect_teradata import teradata_dialect
from sqlfluff.core.dialects.dialect_postgres import postgres_dialect
from sqlfluff.core.dialects.dialect_snowflake import snowflake_dialect
from sqlfluff.core.dialects.dialect_exasol import exasol_dialect
from sqlfluff.core.dialects.dialect_exasol_fs import exasol_fs_dialect
_dialect_lookup = {
"ansi": ansi_dialect,
"bigquery": bigquery_dialect,
"mysql": mysql_dialect,
"teradata": teradata_dialect,
"postgres": postgres_dialect,
"snowflake": snowflake_dialect,
"exasol": exasol_dialect,
"exasol_fs": exasol_fs_dialect,
}
class DialectTuple(NamedTuple):
"""Dialect Tuple object for describing dialects."""
label: str
name: str
inherits_from: str
def dialect_readout():
"""Generate a readout of available dialects."""
for dialect_label in _dialect_lookup:
d = _dialect_lookup[dialect_label]
yield DialectTuple(
label=dialect_label,
name=d.name,
inherits_from=d.inherits_from or "nothing",
)
def dialect_selector(s):
"""Return a dialect given its name."""
s = s or "ansi"
dialect = _dialect_lookup[s]
# Expand any callable references at this point.
dialect.expand()
return dialect
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
__all__ = [
'GetP2sVpnGatewayResult',
'AwaitableGetP2sVpnGatewayResult',
'get_p2s_vpn_gateway',
]
@pulumi.output_type
class GetP2sVpnGatewayResult:
"""
P2SVpnGateway Resource.
"""
def __init__(__self__, custom_dns_servers=None, etag=None, id=None, is_routing_preference_internet=None, location=None, name=None, p2_s_connection_configurations=None, provisioning_state=None, tags=None, type=None, virtual_hub=None, vpn_client_connection_health=None, vpn_gateway_scale_unit=None, vpn_server_configuration=None):
if custom_dns_servers and not isinstance(custom_dns_servers, list):
raise TypeError("Expected argument 'custom_dns_servers' to be a list")
pulumi.set(__self__, "custom_dns_servers", custom_dns_servers)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if is_routing_preference_internet and not isinstance(is_routing_preference_internet, bool):
raise TypeError("Expected argument 'is_routing_preference_internet' to be a bool")
pulumi.set(__self__, "is_routing_preference_internet", is_routing_preference_internet)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if p2_s_connection_configurations and not isinstance(p2_s_connection_configurations, list):
raise TypeError("Expected argument 'p2_s_connection_configurations' to be a list")
pulumi.set(__self__, "p2_s_connection_configurations", p2_s_connection_configurations)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_hub and not isinstance(virtual_hub, dict):
raise TypeError("Expected argument 'virtual_hub' to be a dict")
pulumi.set(__self__, "virtual_hub", virtual_hub)
if vpn_client_connection_health and not isinstance(vpn_client_connection_health, dict):
raise TypeError("Expected argument 'vpn_client_connection_health' to be a dict")
pulumi.set(__self__, "vpn_client_connection_health", vpn_client_connection_health)
if vpn_gateway_scale_unit and not isinstance(vpn_gateway_scale_unit, int):
raise TypeError("Expected argument 'vpn_gateway_scale_unit' to be a int")
pulumi.set(__self__, "vpn_gateway_scale_unit", vpn_gateway_scale_unit)
if vpn_server_configuration and not isinstance(vpn_server_configuration, dict):
raise TypeError("Expected argument 'vpn_server_configuration' to be a dict")
pulumi.set(__self__, "vpn_server_configuration", vpn_server_configuration)
@property
@pulumi.getter(name="customDnsServers")
def custom_dns_servers(self) -> Optional[Sequence[str]]:
"""
List of all customer specified DNS servers IP addresses.
"""
return pulumi.get(self, "custom_dns_servers")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isRoutingPreferenceInternet")
def is_routing_preference_internet(self) -> Optional[bool]:
"""
Enable Routing Preference property for the Public IP Interface of the P2SVpnGateway.
"""
return pulumi.get(self, "is_routing_preference_internet")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="p2SConnectionConfigurations")
def p2_s_connection_configurations(self) -> Optional[Sequence['outputs.P2SConnectionConfigurationResponse']]:
"""
List of all p2s connection configurations of the gateway.
"""
return pulumi.get(self, "p2_s_connection_configurations")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the P2S VPN gateway resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualHub")
def virtual_hub(self) -> Optional['outputs.SubResourceResponse']:
"""
The VirtualHub to which the gateway belongs.
"""
return pulumi.get(self, "virtual_hub")
@property
@pulumi.getter(name="vpnClientConnectionHealth")
def vpn_client_connection_health(self) -> 'outputs.VpnClientConnectionHealthResponse':
"""
All P2S VPN clients' connection health status.
"""
return pulumi.get(self, "vpn_client_connection_health")
@property
@pulumi.getter(name="vpnGatewayScaleUnit")
def vpn_gateway_scale_unit(self) -> Optional[int]:
"""
The scale unit for this p2s vpn gateway.
"""
return pulumi.get(self, "vpn_gateway_scale_unit")
@property
@pulumi.getter(name="vpnServerConfiguration")
def vpn_server_configuration(self) -> Optional['outputs.SubResourceResponse']:
"""
The VpnServerConfiguration to which the p2sVpnGateway is attached to.
"""
return pulumi.get(self, "vpn_server_configuration")
class AwaitableGetP2sVpnGatewayResult(GetP2sVpnGatewayResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetP2sVpnGatewayResult(
custom_dns_servers=self.custom_dns_servers,
etag=self.etag,
id=self.id,
is_routing_preference_internet=self.is_routing_preference_internet,
location=self.location,
name=self.name,
p2_s_connection_configurations=self.p2_s_connection_configurations,
provisioning_state=self.provisioning_state,
tags=self.tags,
type=self.type,
virtual_hub=self.virtual_hub,
vpn_client_connection_health=self.vpn_client_connection_health,
vpn_gateway_scale_unit=self.vpn_gateway_scale_unit,
vpn_server_configuration=self.vpn_server_configuration)
def get_p2s_vpn_gateway(gateway_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetP2sVpnGatewayResult:
"""
P2SVpnGateway Resource.
API Version: 2020-08-01.
:param str gateway_name: The name of the gateway.
:param str resource_group_name: The resource group name of the P2SVpnGateway.
"""
__args__ = dict()
__args__['gatewayName'] = gateway_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network:getP2sVpnGateway', __args__, opts=opts, typ=GetP2sVpnGatewayResult).value
return AwaitableGetP2sVpnGatewayResult(
custom_dns_servers=__ret__.custom_dns_servers,
etag=__ret__.etag,
id=__ret__.id,
is_routing_preference_internet=__ret__.is_routing_preference_internet,
location=__ret__.location,
name=__ret__.name,
p2_s_connection_configurations=__ret__.p2_s_connection_configurations,
provisioning_state=__ret__.provisioning_state,
tags=__ret__.tags,
type=__ret__.type,
virtual_hub=__ret__.virtual_hub,
vpn_client_connection_health=__ret__.vpn_client_connection_health,
vpn_gateway_scale_unit=__ret__.vpn_gateway_scale_unit,
vpn_server_configuration=__ret__.vpn_server_configuration)
|
from frappe import _
def get_data():
return [
{
'module_name': 'Background Verification',
'color': 'grey',
'icon': 'fa fa-star',
'type': 'module',
'label': _('Background Verification'),
'items': [
{
'type': 'doctype',
'name': 'Checks Group',
'label': _('Checks Group'),
'description': _('VHRS Associate Database'),
'hide_count': False
},
{
'type': 'doctype',
'name': 'Add Profile',
'icon': 'fa fa-star',
'label': _('Add Profile'),
'description': _('VHRS Candidate Database')
},
{
'type': 'doctype',
'name': 'BG Profile',
'icon': 'fa fa-star',
'label': _('BG Profile'),
'description': _('Recruitment Documentation')
},
{
'type': 'doctype',
'name': 'Bulk Excel Upload',
'label': _('Bulk Excel Upload'),
'description': _('Interviews for Projects')
},
{
'type': 'doctype',
'name': 'Demographic Data With Attachment',
'label': _('Demographic Data With Attachment'),
'description': _('Interviews for Projects')
},
{
'type': 'doctype',
'name': 'Archive',
'label': _('Archive'),
'description': _('Interviews for Projects')
},
{
'type': 'page',
'name': 'dashboard',
'icon': 'fa fa-dashboard',
'label': _('Dashboard'),
'description': _('VHRS Candidate Database')
}
]
}
]
|
import numpy as np
from ..abstract import Processor
from ..backend.boxes import to_one_hot
class ControlMap(Processor):
"""Controls which inputs are passed ''processor'' and the order of its
outputs.
# Arguments
processor: Function e.g. a ''paz.processor''
intro_indices: List of Ints.
outro_indices: List of Ints.
keep: ''None'' or dictionary. If ``None`` control maps operates
without explicitly retaining an input. If dict it must contain
as keys the input args to be kept and as values where they should
be located at the end.
"""
def __init__(self, processor, intro_indices=[0], outro_indices=[0],
keep=None):
self.processor = processor
if not isinstance(intro_indices, list):
raise ValueError('``intro_indices`` must be a list')
if not isinstance(outro_indices, list):
raise ValueError('``outro_indices`` must be a list')
self.intro_indices = intro_indices
self.outro_indices = outro_indices
name = '-'.join([self.__class__.__name__, self.processor.name])
self.keep = keep
super(ControlMap, self).__init__(name)
def _select(self, inputs, indices):
return [inputs[index] for index in indices]
def _remove(self, inputs, indices):
return [inputs[i] for i in range(len(inputs)) if i not in indices]
def _split(self, inputs, indices):
return self._select(inputs, indices), self._remove(inputs, indices)
def _insert(self, args, extra_args, indices):
[args.insert(index, arg) for index, arg in zip(indices, extra_args)]
return args
def call(self, *args):
selected_args, remaining_args = self._split(args, self.intro_indices)
processed_args = self.processor(*selected_args)
if not isinstance(processed_args, tuple):
processed_args = [processed_args]
return_args = self._insert(
remaining_args, processed_args, self.outro_indices)
if self.keep is not None:
keep_intro = list(self.keep.keys())
keep_outro = list(self.keep.values())
keep_args = self._select(args, keep_intro)
return_args = self._insert(return_args, keep_args, keep_outro)
return tuple(return_args)
class ExpandDomain(ControlMap):
"""Extends number of inputs a function can take applying the identity
function to all new/extended inputs.
e.g. For a given function f(x) = y. If g = ExtendInputs(f), we can
now have g(x, x1, x2, ..., xn) = y, x1, x2, ..., xn.
# Arguments
processor: Function e.g. any procesor in ''paz.processors''.
"""
def __init__(self, processor):
super(ExpandDomain, self).__init__(processor)
class CopyDomain(Processor):
"""Copies ''intro_indices'' and places it ''outro_indices''.
# Arguments
intro_indices: List of Ints.
outro_indices: List of Ints.
"""
def __init__(self, intro_indices, outro_indices):
super(CopyDomain, self).__init__()
if not isinstance(intro_indices, list):
raise ValueError('``intro_indices`` must be a list')
if not isinstance(outro_indices, list):
raise ValueError('``outro_indices`` must be a list')
self.intro_indices = intro_indices
self.outro_indices = outro_indices
def _select(self, inputs, indices):
return [inputs[index] for index in indices]
def _insert(self, args, axes, values):
[args.insert(axis, value) for axis, value in zip(axes, values)]
return args
def call(self, *args):
selections = self._select(args, self.intro_indices)
args = self._insert(list(args), self.outro_indices, selections)
return tuple(args)
class UnpackDictionary(Processor):
"""Unpacks dictionary into a tuple.
# Arguments
order: List of strings containing the keys of the dictionary.
The order of the list is the order in which the tuple
would be ordered.
"""
def __init__(self, order):
if not isinstance(order, list):
raise ValueError('``order`` must be a list')
self.order = order
super(UnpackDictionary, self).__init__()
def call(self, kwargs):
args = tuple([kwargs[name] for name in self.order])
return args
class WrapOutput(Processor):
"""Wraps arguments in dictionary
# Arguments
keys: List of strings representing the keys used to wrap the inputs.
The order of the list must correspond to the same order of
inputs (''args'').
"""
def __init__(self, keys):
if not isinstance(keys, list):
raise ValueError('``order`` must be a list')
self.keys = keys
super(WrapOutput, self).__init__()
def call(self, *args):
return dict(zip(self.keys, args))
class ExtendInputs(Processor):
"""Extends number of inputs a function can take applying the identity
function to all new/extended inputs.
e.g. For a given function f(x) = y. If g = ExtendInputs(f), we can
now have g(x, x1, x2, ..., xn) = y, x1, x2, ..., xn.
# Arguments
processor: Function e.g. any procesor in ''paz.processors''.
"""
def __init__(self, processor):
self.processor = processor
name = '-'.join([self.__class__.__name__, self.processor.name])
super(ExtendInputs, self).__init__(name)
def call(self, X, *args):
return self.processor(X), args
class Concatenate(Processor):
"""Concatenates a list of arrays in given ''axis''.
# Arguments
axis: Int.
"""
def __init__(self, axis):
super(Concatenate, self)
self.axis = axis
def call(self, inputs):
return np.concatenate(inputs, self.axis)
class SequenceWrapper(Processor):
"""Wraps arguments to directly use
''paz.abstract.ProcessingSequence'' or
''paz.abstract.GeneratingSequence''.
# Arguments
inputs_info: Dictionary containing an integer per key representing
the argument to grab, and as value a dictionary containing the
tensor name as key and the tensor shape of a single sample as value
e.g. {0: {'input_image': [300, 300, 3]}, 1: {'depth': [300, 300]}}.
The values given here are for the inputs of the model.
labels_info: Dictionary containing an integer per key representing
the argument to grab, and as value a dictionary containing the
tensor name as key and the tensor shape of a single sample as value
e.g. {2: {'classes': [10]}}.
The values given here are for the labels of the model.
"""
def __init__(self, inputs_info, labels_info):
if not isinstance(inputs_info, dict):
raise ValueError('``inputs_info`` must be a dictionary')
self.inputs_info = inputs_info
if not isinstance(labels_info, dict):
raise ValueError('``inputs_info`` must be a dictionary')
self.labels_info = labels_info
self.inputs_name_to_shape = self._extract_name_to_shape(inputs_info)
self.labels_name_to_shape = self._extract_name_to_shape(labels_info)
self.ordered_input_names = self._extract_ordered_names(inputs_info)
self.ordered_label_names = self._extract_ordered_names(labels_info)
super(SequenceWrapper, self).__init__()
def _extract_name_to_shape(self, info):
name_to_shape = {}
for values in info.values():
for key, value in values.items():
name_to_shape[key] = value
return name_to_shape
def _extract_ordered_names(self, info):
arguments = list(info.keys())
arguments.sort()
names = []
for argument in arguments:
names.append(list(info[argument].keys())[0])
return names
def _wrap(self, args, info):
wrap = {}
for arg, name_to_shape in info.items():
name = list(name_to_shape.keys())[0]
wrap[name] = args[arg]
return wrap
def call(self, *args):
inputs = self._wrap(args, self.inputs_info)
labels = self._wrap(args, self.labels_info)
return {'inputs': inputs, 'labels': labels}
class Predict(Processor):
"""Perform input preprocessing, model prediction and output postprocessing.
# Arguments
model: Class with a ''predict'' method e.g. a Keras model.
preprocess: Function applied to given inputs.
postprocess: Function applied to outputted predictions from model.
"""
def __init__(self, model, preprocess=None, postprocess=None):
super(Predict, self).__init__()
self.model = model
self.preprocess = preprocess
self.postprocess = postprocess
def call(self, x):
if self.preprocess is not None:
x = self.preprocess(x)
y = self.model.predict(x)
if self.postprocess is not None:
y = self.postprocess(y)
return y
class ToClassName(Processor):
def __init__(self, labels):
super(ToClassName, self).__init__()
self.labels = labels
def call(self, x):
return self.labels[np.argmax(x)]
class ExpandDims(Processor):
"""Expand dimension of given array.
# Arguments
axis: Int.
"""
def __init__(self, axis):
super(ExpandDims, self).__init__()
self.axis = axis
def call(self, x):
return np.expand_dims(x, self.axis)
class SelectElement(Processor):
"""Selects element of input value.
# Arguments
index: Int. argument to select from ''inputs''.
"""
def __init__(self, index):
super(SelectElement, self).__init__()
self.index = index
def call(self, inputs):
return inputs[self.index]
class BoxClassToOneHotVector(Processor):
"""Transform box data with class index to a one-hot encoded vector.
# Arguments
num_classes: Integer. Total number of classes.
"""
def __init__(self, num_classes):
self.num_classes = num_classes
super(BoxClassToOneHotVector, self).__init__()
def call(self, boxes):
class_indices = boxes[:, 4].astype('int')
one_hot_vectors = to_one_hot(class_indices, self.num_classes)
one_hot_vectors = one_hot_vectors.reshape(-1, self.num_classes)
boxes = np.hstack([boxes[:, :4], one_hot_vectors.astype('float')])
return boxes
class Squeeze(Processor):
"""Wrap around numpy `squeeze` due to common use before model predict.
# Arguments
expand_dims: Int or list of Ints.
topic: String.
"""
def __init__(self, axis):
super(Squeeze, self).__init__()
self.axis = axis
def call(self, x):
return np.squeeze(x, axis=self.axis)
class Copy(Processor):
"""Copies value passed to function.
"""
def __init__(self):
super(Copy, self).__init__()
def call(self, x):
return x.copy()
class Lambda(object):
"""Applies a lambda function as a processor transformation.
# Arguments
function: Function.
"""
def __init__(self, function):
self.function = function
def __call__(self, x):
return self.function(x)
class StochasticProcessor(Processor):
def __init__(self, probability=0.5, name=None):
"""Adds stochasticity to the user implemented ``call`` function
# Arguments:
probability: Probability of calling ``call`` function
# Example:
```python
class RandomAdd(StochasticProcessor):
def __init__(self, probability=0.5):
super(StochasticProcessor, self).__init__(probability)
def call(self, x):
return x + 1
random_add = RandomAdd(probability=0.5)
# value can be either 1.0 or 2.0
value = random_add(1.0)
```
"""
super(StochasticProcessor, self).__init__(name=name)
self.probability = probability
def call(self, X):
raise NotImplementedError
def __call__(self, X):
if self.probability >= np.random.rand():
return self.call(X)
return X
|
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for bitcoind node under test"""
import contextlib
import decimal
import errno
from enum import Enum
import http.client
import json
import logging
import os
import re
import subprocess
import tempfile
import time
import urllib.parse
import collections
from .authproxy import JSONRPCException
from .util import (
append_config,
delete_cookie_file,
get_rpc_proxy,
rpc_url,
wait_until,
p2p_port,
)
# For Python 3.4 compatibility
JSONDecodeError = getattr(json, "JSONDecodeError", ValueError)
BITCOIND_PROC_WAIT_TIMEOUT = 60
class FailedToStartError(Exception):
"""Raised when a node fails to start correctly."""
class ErrorMatch(Enum):
FULL_TEXT = 1
FULL_REGEX = 2
PARTIAL_REGEX = 3
class TestNode():
"""A class for representing a bitcoind node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, datadir, *, rpchost, timewait, bitcoind, bitcoin_cli, mocktime, coverage_dir, extra_conf=None, extra_args=None, use_cli=False):
self.index = i
self.datadir = datadir
self.stdout_dir = os.path.join(self.datadir, "stdout")
self.stderr_dir = os.path.join(self.datadir, "stderr")
self.rpchost = rpchost
self.rpc_timeout = timewait
self.binary = bitcoind
self.coverage_dir = coverage_dir
if extra_conf is not None:
append_config(datadir, extra_conf)
# Most callers will just need to add extra args to the standard list below.
# For those callers that need more flexibility, they can just set the args property directly.
# Note that common args are set in the config file (see initialize_datadir)
self.extra_args = extra_args
self.args = [
self.binary,
"-datadir=" + self.datadir,
"-logtimemicros",
"-debug",
"-debugexclude=libevent",
"-debugexclude=leveldb",
"-mocktime=" + str(mocktime),
"-uacomment=testnode%d" % i
]
self.cli = TestNodeCLI(bitcoin_cli, self.datadir)
self.use_cli = use_cli
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
self.p2ps = []
def get_deterministic_priv_key(self):
"""Return a deterministic priv key in base58, that only depends on the node's index"""
AddressKeyPair = collections.namedtuple('AddressKeyPair', ['address', 'key'])
PRIV_KEYS = [
# address , privkey
AddressKeyPair('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'),
AddressKeyPair('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'),
AddressKeyPair('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'),
AddressKeyPair('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'),
AddressKeyPair('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'),
AddressKeyPair('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'),
AddressKeyPair('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'),
AddressKeyPair('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'),
AddressKeyPair('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'),
]
return PRIV_KEYS[self.index]
def get_mem_rss_kilobytes(self):
"""Get the memory usage (RSS) per `ps`.
Returns None if `ps` is unavailable.
"""
assert self.running
try:
return int(subprocess.check_output(
["ps", "h", "-o", "rss", "{}".format(self.process.pid)],
stderr=subprocess.DEVNULL).split()[-1])
# Avoid failing on platforms where ps isn't installed.
#
# We could later use something like `psutils` to work across platforms.
except (FileNotFoundError, subprocess.SubprocessError):
self.log.exception("Unable to get memory usage")
return None
def _node_msg(self, msg: str) -> str:
"""Return a modified msg that identifies this node by its index as a debugging aid."""
return "[node %d] %s" % (self.index, msg)
def _raise_assertion_error(self, msg: str):
"""Raise an AssertionError with msg modified to identify this node."""
raise AssertionError(self._node_msg(msg))
def __del__(self):
# Ensure that we don't leave any bitcoind processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print(self._node_msg("Cleaning up leftover process"))
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(self.cli, name)
else:
assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection")
return getattr(self.rpc, name)
def start(self, extra_args=None, *, stdout=None, stderr=None, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
# Add a new stdout and stderr file each time bitcoind is started
if stderr is None:
stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False)
if stdout is None:
stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False)
self.stderr = stderr
self.stdout = stdout
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by bitcoind, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir)
# add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal
subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, **kwargs)
self.running = True
self.log.debug("bitcoind started, waiting for RPC to come up")
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the bitcoind process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
if self.process.poll() is not None:
raise FailedToStartError(self._node_msg(
'bitcoind exited with status {} during initialization'.format(self.process.returncode)))
try:
self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
self.rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
self.rpc_connected = True
self.url = self.rpc.url
self.log.debug("RPC successfully started")
return
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
# -28 RPC in warmup
# -342 Service unavailable, RPC server started but is shutting down due to error
if e.error['code'] != -28 and e.error['code'] != -342:
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
self._raise_assertion_error("Unable to connect to bitcoind")
def generate(self, nblocks, maxtries=1000000):
self.log.debug("TestNode.generate() dispatches `generate` call to `generatetoaddress`")
return self.generatetoaddress(nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries)
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
return self.cli("-rpcwallet={}".format(wallet_name))
else:
assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected")
wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name))
return self.rpc / wallet_path
def stop_node(self, expected_stderr='', wait=0):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
self.stop(wait=wait)
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
# Check that stderr is as expected
self.stderr.seek(0)
stderr = self.stderr.read().decode('utf-8').strip()
if stderr != expected_stderr:
raise AssertionError("Unexpected stderr {} != {}".format(stderr, expected_stderr))
self.stdout.close()
self.stderr.close()
del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert return_code == 0, self._node_msg(
"Node returned non-zero exit code (%d) when stopping" % return_code)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout)
@contextlib.contextmanager
def assert_debug_log(self, expected_msgs):
debug_log = os.path.join(self.datadir, 'regtest', 'debug.log')
with open(debug_log, encoding='utf-8') as dl:
dl.seek(0, 2)
prev_size = dl.tell()
try:
yield
finally:
with open(debug_log, encoding='utf-8') as dl:
dl.seek(prev_size)
log = dl.read()
print_log = " - " + "\n - ".join(log.splitlines())
for expected_msg in expected_msgs:
if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:
self._raise_assertion_error('Expected message "{}" does not partially match log:\n\n{}\n\n'.format(expected_msg, print_log))
@contextlib.contextmanager
def assert_memory_usage_stable(self, *, increase_allowed=0.03):
"""Context manager that allows the user to assert that a node's memory usage (RSS)
hasn't increased beyond some threshold percentage.
Args:
increase_allowed (float): the fractional increase in memory allowed until failure;
e.g. `0.12` for up to 12% increase allowed.
"""
before_memory_usage = self.get_mem_rss_kilobytes()
yield
after_memory_usage = self.get_mem_rss_kilobytes()
if not (before_memory_usage and after_memory_usage):
self.log.warning("Unable to detect memory usage (RSS) - skipping memory check.")
return
perc_increase_memory_usage = (after_memory_usage / before_memory_usage) - 1
if perc_increase_memory_usage > increase_allowed:
self._raise_assertion_error(
"Memory usage increased over threshold of {:.3f}% from {} to {} ({:.3f}%)".format(
increase_allowed * 100, before_memory_usage, after_memory_usage,
perc_increase_memory_usage * 100))
def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
"""Attempt to start the node and expect it to raise an error.
extra_args: extra arguments to pass through to bitcoind
expected_msg: regex that stderr should match when bitcoind fails
Will throw if bitcoind starts without an error.
Will throw if an expected_msg is provided and it does not match bitcoind's stdout."""
with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \
tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:
try:
self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs)
self.wait_for_rpc_connection()
self.stop_node()
self.wait_until_stopped()
except FailedToStartError as e:
self.log.debug('bitcoind failed to start: %s', e)
self.running = False
self.process = None
# Check stderr for expected message
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8').strip()
if match == ErrorMatch.PARTIAL_REGEX:
if re.search(expected_msg, stderr, flags=re.MULTILINE) is None:
self._raise_assertion_error(
'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_REGEX:
if re.fullmatch(expected_msg, stderr) is None:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_TEXT:
if expected_msg != stderr:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
else:
if expected_msg is None:
assert_msg = "bitcoind should have exited with an error"
else:
assert_msg = "bitcoind should have exited with expected error " + expected_msg
self._raise_assertion_error(assert_msg)
def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
p2p_conn.peer_connect(**kwargs)()
self.p2ps.append(p2p_conn)
if wait_for_verack:
p2p_conn.wait_for_verack()
return p2p_conn
@property
def p2p(self):
"""Return the first p2p connection
Convenience property - most tests only use a single p2p connection to each
node, so this saves having to write node.p2ps[0] many times."""
assert self.p2ps, self._node_msg("No p2p connection")
return self.p2ps[0]
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
class TestNodeCLIAttr:
def __init__(self, cli, command):
self.cli = cli
self.command = command
def __call__(self, *args, **kwargs):
return self.cli.send_cli(self.command, *args, **kwargs)
def get_request(self, *args, **kwargs):
return lambda: self(*args, **kwargs)
class TestNodeCLI():
"""Interface to bitcoin-cli for an individual node"""
def __init__(self, binary, datadir):
self.options = []
self.binary = binary
self.datadir = datadir
self.input = None
self.log = logging.getLogger('TestFramework.bitcoincli')
def __call__(self, *options, input=None):
# TestNodeCLI is callable with bitcoin-cli command-line options
cli = TestNodeCLI(self.binary, self.datadir)
cli.options = [str(o) for o in options]
cli.input = input
return cli
def __getattr__(self, command):
return TestNodeCLIAttr(self, command)
def batch(self, requests):
results = []
for request in requests:
try:
results.append(dict(result=request()))
except JSONRPCException as e:
results.append(dict(error=e))
return results
def send_cli(self, command=None, *args, **kwargs):
"""Run bitcoin-cli command. Deserializes returned string as python object."""
pos_args = [str(arg).lower() if type(arg) is bool else str(arg) for arg in args]
named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same bitcoin-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
if named_args:
p_args += ["-named"]
if command is not None:
p_args += [command]
p_args += pos_args + named_args
self.log.debug("Running bitcoin-cli command: %s" % command)
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
if match:
code, message = match.groups()
raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
except JSONDecodeError:
return cli_stdout.rstrip("\n")
|
import pytest
from my_package import process
@pytest.mark.parametrize(
'name, expected',
[
['Hemingway, Ernest', 'Ernest Hemingway'],
['virginia woolf', 'Virginia Woolf'],
['charles dickens ', 'Charles Dickens'],
],
)
def test_clean_name(name, expected):
assert process.clean_name(name) == expected
|
import os
import typing
import pandas as pd
import numpy as np
from d3m import container, utils
from d3m.base import utils as base_utils
from d3m.metadata import base as metadata_base, hyperparams
from d3m.primitive_interfaces import base, transformer
import common_primitives
import logging
import math
from scipy.fft import dct
from collections import OrderedDict
from typing import cast, Dict, List, Union, Sequence, Optional, Tuple
from scipy import sparse
from numpy import ndarray
__all__ = ('DiscreteCosineTransform',)
Inputs = container.DataFrame
Outputs = container.DataFrame
class Hyperparams(hyperparams.Hyperparams):
type_ = hyperparams.UniformInt(
lower=1,
upper=4,
upper_inclusive = True,
default=2,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Type of the DCT. Default is 2",
)
axis = hyperparams.Hyperparameter[int](
default=-1,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Axis over which to compute the DCT. If not given, the last axis is used.",
)
n = hyperparams.Union[Union[int, None]](
configuration=OrderedDict(
limit=hyperparams.Bounded[int](
lower=1,
upper=None,
default=10,
),
unlimited=hyperparams.Constant(
default=None,
description='If n is not given, the length of the input along the axis specified by axis is used.',
),
),
default='unlimited',
description='Length of the transformed axis of the output. If n is smaller than the length of the input, the input is cropped. If it is larger, the input is padded with zeros.',
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
)
norm = hyperparams.Enumeration(
values=[None,"ortho"],
default=None,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Normalization mode. Default is None, meaning no normalization on the forward transforms and scaling by 1/n on the ifft. For norm=""ortho"", both directions are scaled by 1/sqrt(n).",
)
overwrite_x = hyperparams.UniformBool(
default=False,
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter'],
description="If True, the contents of x can be destroyed; the default is False. See the notes below for more details.",
)
workers = hyperparams.Union[Union[float, None]](
configuration=OrderedDict(
limit=hyperparams.Bounded[int](
lower=1,
upper=None,
default=10,
),
unlimited=hyperparams.Constant(
default=None,
description='If nothing is give as a paramter',
),
),
default='unlimited',
description="Maximum number of workers to use for parallel computation. If negative, the value wraps around from os.cpu_count().",
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
)
# parameters for column
use_columns = hyperparams.Set(
elements=hyperparams.Hyperparameter[int](-1),
default=(),
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="A set of column indices to force primitive to operate on. If any specified column cannot be parsed, it is skipped.",
)
exclude_columns = hyperparams.Set(
elements=hyperparams.Hyperparameter[int](-1),
default=(),
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="A set of column indices to not operate on. Applicable only if \"use_columns\" is not provided.",
)
return_result = hyperparams.Enumeration(
values=['append', 'replace', 'new'],
default='new',
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Should parsed columns be appended, should they replace original columns, or should only parsed columns be returned? This hyperparam is ignored if use_semantic_types is set to false.",
)
use_semantic_types = hyperparams.UniformBool(
default=False,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Controls whether semantic_types metadata will be used for filtering columns in input dataframe. Setting this to false makes the code ignore return_result and will produce only the output dataframe"
)
add_index_columns = hyperparams.UniformBool(
default=False,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Also include primary index columns if input data has them. Applicable only if \"return_result\" is set to \"new\".",
)
error_on_no_input = hyperparams.UniformBool(
default=True,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Throw an exception if no input column is selected/provided. Defaults to true to behave like sklearn. To prevent pipelines from breaking set this to False.",
)
return_semantic_type = hyperparams.Enumeration[str](
values=['https://metadata.datadrivendiscovery.org/types/Attribute',
'https://metadata.datadrivendiscovery.org/types/ConstructedAttribute'],
default='https://metadata.datadrivendiscovery.org/types/Attribute',
description='Decides what semantic type to attach to generated attributes',
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']
)
class DCT:
def __init__(self,type_,n,axis,overwrite_x,norm,workers):
self._type = type_
self._n = n
self._axis = axis
self._overwrite_x = overwrite_x
self._norm = norm
self._workers = workers
def produce(self, inputs):
dataframe = inputs
processed_df = utils.pandas.DataFrame()
try:
for target_column in dataframe.columns :
dct_input = dataframe[target_column].values
dct_output = dct(x=dct_input,type=self._type,n=self._n,axis=self._axis,overwrite_x=self._overwrite_x,norm=self._norm,workers=self._workers)
processed_df[target_column+"_dct_coeff"]=pd.Series(dct_output)
except IndexError:
logging.warning("Index not found in dataframe")
return processed_df;
class DiscreteCosineTransform(transformer.TransformerPrimitiveBase[Inputs, Outputs, Hyperparams]):
"""
Compute the 1-D discrete Cosine Transform.
Return the Discrete Cosine Transform of arbitrary type sequence x.
scipy documentation: https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.dct.html#scipy.fft.dct
Parameters
----------
type_: int
Type of the DCT. Default is 2
n: int
Length of the transformed axis of the output. If n is smaller than the length of the input, the input is cropped. If it is larger, the input is padded with zeros.
axis: int
Axis over which to compute the DCT. If not given, the last axis is used.
norm: str
Normalization mode. Default is None, meaning no normalization on the forward transforms and scaling by 1/n on the ifft. For norm=""ortho"", both directions are scaled by 1/sqrt(n).
overwrite_x: boolean
If True, the contents of x can be destroyed; the default is False. See the notes below for more details.
workers: int
Maximum number of workers to use for parallel computation. If negative, the value wraps around from os.cpu_count(). Defualt is None.
use_columns: Set
A set of column indices to force primitive to operate on. If any specified column cannot be parsed, it is skipped.
exclude_columns: Set
A set of column indices to not operate on. Applicable only if \"use_columns\" is not provided.
return_result: Enumeration
Should parsed columns be appended, should they replace original columns, or should only parsed columns be returned? This hyperparam is ignored if use_semantic_types is set to false.
use_semantic_types: Bool
Controls whether semantic_types metadata will be used for filtering columns in input dataframe. Setting this to false makes the code ignore return_result and will produce only the output dataframe.
add_index_columns: Bool
Also include primary index columns if input data has them. Applicable only if \"return_result\" is set to \"new\".
error_on_no_input: Bool(
Throw an exception if no input column is selected/provided. Defaults to true to behave like sklearn. To prevent pipelines from breaking set this to False.
return_semantic_type: Enumeration[str](
Decides what semantic type to attach to generated attributes'
"""
__author__ = "Data Lab"
metadata = metadata_base.PrimitiveMetadata(
{
"__author__ " : "DATA Lab at Texas A&M University",
'name': "Discrete Cosine Transform",
'python_path': 'd3m.primitives.tods.feature_analysis.discrete_cosine_transform',
'source': {
'name': 'DATA Lab at Texas A&M University',
'contact': 'mailto:khlai037@tamu.edu',
'uris': [
'https://gitlab.com/lhenry15/tods.git',
'https://gitlab.com/lhenry15/tods/-/blob/purav/anomaly-primitives/anomaly_primitives/DiscreteCosineTransform.py',
],
},
'algorithm_types': [
metadata_base.PrimitiveAlgorithmType.DISCRETE_COSINE_TRANSFORM,
],
'primitive_family': metadata_base.PrimitiveFamily.FEATURE_CONSTRUCTION,
'id': '584fa7d5-39cc-4cf8-8d5b-5f3a2648f767',
'hyperparameters_to_tune':['n','norm','axis','type_'],
'version': '0.0.1',
},
)
def __init__(self, *, hyperparams: Hyperparams) -> None:
super().__init__(hyperparams=hyperparams)
self._clf = DCT(type_=self.hyperparams['type_'],
n=self.hyperparams['n'],
axis=self.hyperparams['axis'],
overwrite_x=self.hyperparams['overwrite_x'],
norm = self.hyperparams['norm'],
workers = self.hyperparams['workers']
)
def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> base.CallResult[Outputs]:
"""
Args:
inputs: Container DataFrame
Returns:
Container DataFrame added with DCT coefficients in a column named 'column_name_dct_coeff'
"""
assert isinstance(inputs, container.DataFrame), type(dataframe)
self._fitted = False
self._training_inputs, self._training_indices = self._get_columns_to_fit(inputs, self.hyperparams)
self._input_column_names = self._training_inputs.columns
if len(self._training_indices) > 0:
# self._clf.fit(self._training_inputs)
self._fitted = True
else:
if self.hyperparams['error_on_no_input']:
raise RuntimeError("No input columns were selected")
self.logger.warn("No input columns were selected")
if not self._fitted:
raise PrimitiveNotFittedError("Primitive not fitted.")
sk_inputs = inputs
if self.hyperparams['use_semantic_types']:
cols = [inputs.columns[x] for x in self._training_indices]
sk_inputs = container.DataFrame(data = inputs.iloc[:, self._training_indices].values,columns = cols, generate_metadata=True)
output_columns = []
if len(self._training_indices) > 0:
sk_output = self._clf.produce(sk_inputs)
if sparse.issparse(sk_output):
sk_output = sk_output.toarray()
outputs = self._wrap_predictions(inputs, sk_output)
# if len(outputs.columns) == len(self._input_column_names):
# outputs.columns = self._input_column_names
output_columns = [outputs]
else:
if self.hyperparams['error_on_no_input']:
raise RuntimeError("No input columns were selected")
self.logger.warn("No input columns were selected")
outputs = base_utils.combine_columns(return_result=self.hyperparams['return_result'],
add_index_columns=self.hyperparams['add_index_columns'],
inputs=inputs, column_indices=self._training_indices,
columns_list=output_columns)
return base.CallResult(outputs)
@classmethod
def _get_columns_to_fit(cls, inputs: Inputs, hyperparams: Hyperparams):
"""
Select columns to fit.
Args:
inputs: Container DataFrame
hyperparams: d3m.metadata.hyperparams.Hyperparams
Returns:
list
"""
if not hyperparams['use_semantic_types']:
return inputs, list(range(len(inputs.columns)))
# return inputs, list(hyperparams['use_columns'])
inputs_metadata = inputs.metadata
def can_produce_column(column_index: int) -> bool:
return cls._can_produce_column(inputs_metadata, column_index, hyperparams)
columns_to_produce, columns_not_to_produce = base_utils.get_columns_to_use(inputs_metadata,
use_columns=hyperparams['use_columns'],
exclude_columns=hyperparams['exclude_columns'],
can_use_column=can_produce_column)
return inputs.iloc[:, columns_to_produce], columns_to_produce
# return columns_to_produce
@classmethod
def _can_produce_column(cls, inputs_metadata: metadata_base.DataMetadata, column_index: int,
hyperparams: Hyperparams) -> bool:
"""
Output whether a column can be processed.
Args:
inputs_metadata: d3m.metadata.base.DataMetadata
column_index: int
Returns:
bool
"""
column_metadata = inputs_metadata.query((metadata_base.ALL_ELEMENTS, column_index))
accepted_structural_types = (int, float, np.integer, np.float64,str)
accepted_semantic_types = set()
accepted_semantic_types.add("https://metadata.datadrivendiscovery.org/types/Attribute")
if not issubclass(column_metadata['structural_type'], accepted_structural_types):
print(column_index, "does not match the structural_type requirements in metadata. Skipping column")
return False
semantic_types = set(column_metadata.get('semantic_types', []))
# print("length sematic type",len(semantic_types))
# returing true for testing purposes for custom dataframes
return True;
if len(semantic_types) == 0:
cls.logger.warning("No semantic types found in column metadata")
return False
# Making sure all accepted_semantic_types are available in semantic_types
if len(accepted_semantic_types - semantic_types) == 0:
return True
print(semantic_types)
return False
@classmethod
def _get_target_columns_metadata(cls, outputs_metadata: metadata_base.DataMetadata, hyperparams) -> List[OrderedDict]:
"""
Output metadata of selected columns.
Args:
outputs_metadata: metadata_base.DataMetadata
hyperparams: d3m.metadata.hyperparams.Hyperparams
Returns:
d3m.metadata.base.DataMetadata
"""
outputs_length = outputs_metadata.query((metadata_base.ALL_ELEMENTS,))['dimension']['length']
target_columns_metadata: List[OrderedDict] = []
for column_index in range(outputs_length):
column_metadata = OrderedDict(outputs_metadata.query_column(column_index))
# Update semantic types and prepare it for predicted targets.
semantic_types = set(column_metadata.get('semantic_types', []))
semantic_types_to_remove = set([])
add_semantic_types = []
add_semantic_types.add(hyperparams["return_semantic_type"])
semantic_types = semantic_types - semantic_types_to_remove
semantic_types = semantic_types.union(add_semantic_types)
column_metadata['semantic_types'] = list(semantic_types)
target_columns_metadata.append(column_metadata)
return target_columns_metadata
@classmethod
def _update_predictions_metadata(cls, inputs_metadata: metadata_base.DataMetadata, outputs: Optional[Outputs],
target_columns_metadata: List[OrderedDict]) -> metadata_base.DataMetadata:
"""
Updata metadata for selected columns.
Args:
inputs_metadata: metadata_base.DataMetadata
outputs: Container Dataframe
target_columns_metadata: list
Returns:
d3m.metadata.base.DataMetadata
"""
outputs_metadata = metadata_base.DataMetadata().generate(value=outputs)
for column_index, column_metadata in enumerate(target_columns_metadata):
column_metadata.pop("structural_type", None)
outputs_metadata = outputs_metadata.update_column(column_index, column_metadata)
return outputs_metadata
def _wrap_predictions(self, inputs: Inputs, predictions: ndarray) -> Outputs:
"""
Wrap predictions into dataframe
Args:
inputs: Container Dataframe
predictions: array-like data (n_samples, n_features)
Returns:
Dataframe
"""
outputs = container.DataFrame(predictions, generate_metadata=True)
target_columns_metadata = self._add_target_columns_metadata(outputs.metadata,self.hyperparams)
outputs.metadata = self._update_predictions_metadata(inputs.metadata, outputs, target_columns_metadata)
# print(outputs.metadata.to_internal_simple_structure())
return outputs
@classmethod
def _add_target_columns_metadata(cls, outputs_metadata: metadata_base.DataMetadata, hyperparams):
"""
Add target columns metadata
Args:
outputs_metadata: metadata.base.DataMetadata
hyperparams: d3m.metadata.hyperparams.Hyperparams
Returns:
List[OrderedDict]
"""
outputs_length = outputs_metadata.query((metadata_base.ALL_ELEMENTS,))['dimension']['length']
target_columns_metadata: List[OrderedDict] = []
for column_index in range(outputs_length):
# column_name = "output_{}".format(column_index)
column_metadata = OrderedDict()
semantic_types = set()
semantic_types.add(hyperparams["return_semantic_type"])
column_metadata['semantic_types'] = list(semantic_types)
# column_metadata["name"] = str(column_name)
target_columns_metadata.append(column_metadata)
return target_columns_metadata
DiscreteCosineTransform.__doc__ = DiscreteCosineTransform.__doc__
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 8
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_2_1
from isi_sdk_8_2_1.models.filepool_policy_action_create_params import FilepoolPolicyActionCreateParams # noqa: E501
from isi_sdk_8_2_1.rest import ApiException
class TestFilepoolPolicyActionCreateParams(unittest.TestCase):
"""FilepoolPolicyActionCreateParams unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testFilepoolPolicyActionCreateParams(self):
"""Test FilepoolPolicyActionCreateParams"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_2_1.models.filepool_policy_action_create_params.FilepoolPolicyActionCreateParams() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
import socket
import urllib.parse
import configargparse
def get_arg_parser_with_db(description):
"""
Returns an ArgumentParser pre-initalized with common arguments for configuring logging and the main
database connection. It also supports reading arguments from environment variables.
"""
parser = configargparse.ArgumentParser(description=description, auto_env_var_prefix='ctf_')
parser.add_argument('--loglevel', default='WARNING', type=str,
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], help='Log level')
db_group = parser.add_argument_group('database', 'Gameserver database')
db_group.add_argument('--dbhost', type=str, help='Hostname of the database. If unspecified, the '
'default Unix socket will be used.')
db_group.add_argument('--dbname', type=str, required=True, help='Name of the used database')
db_group.add_argument('--dbuser', type=str, required=True, help='User name for database access')
db_group.add_argument('--dbpassword', type=str, help='Password for database access if needed')
return parser
def parse_host_port(text):
"""
Parses a host and port specification from a string in the format `<host>:<port>`.
Returns:
The parsing result as a tuple of (host, port, family). `family` is a constant from Python's socket
interface representing an address family, e.g. `socket.AF_INET`.
"""
# Use pseudo URL for splitting, see https://stackoverflow.com/a/53172593
url_parts = urllib.parse.urlsplit('//' + text)
if url_parts.hostname is None or url_parts.port is None:
raise ValueError('Invalid host or port')
try:
addrinfo = socket.getaddrinfo(url_parts.hostname, url_parts.port)
except socket.gaierror:
raise ValueError('Could not determine address family')
return (url_parts.hostname, url_parts.port, addrinfo[0][0])
|
import decimal
import sys
import psycopg2
conn = psycopg2.connect('')
cur = conn.cursor()
cur.execute("SELECT 1, 2+{}".format(sys.argv[1]))
v = cur.fetchall()
assert v == [(1, 5)]
# Verify #6597 (timestamp format) is fixed.
cur = conn.cursor()
cur.execute("SELECT now()")
v = cur.fetchall()
# Verify round-trip of strings containing backslashes.
# https://github.com/inspurClouddb/inspurClouddb-python/issues/23
s = ('\\\\',)
cur.execute("SELECT %s", s)
v = cur.fetchall()
assert v == [s], (v, s)
# Verify decimals with exponents can be parsed.
cur = conn.cursor()
cur.execute("SELECT 1e1::decimal")
v = cur.fetchall()
d = v[0][0]
assert type(d) is decimal.Decimal
# Use of compare_total here guarantees that we didn't just get '10' back, we got '1e1'.
assert d.compare_total(decimal.Decimal('1e1')) == 0
# Verify arrays with strings can be parsed.
cur = conn.cursor()
cur.execute("SELECT ARRAY['foo','bar','baz']")
v = cur.fetchall()
d = v[0][0]
assert d == ["foo","bar","baz"]
# Verify JSON values come through properly.
cur = conn.cursor()
cur.execute("SELECT '{\"a\":\"b\"}'::JSONB")
v = cur.fetchall()
d = v[0][0]
assert d == {"a": "b"}
|
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from helpers.utils import to_data, expand_dims, \
int_type, float_type, long_type, add_weight_norm
from helpers.layers import build_conv_encoder, build_dense_encoder
class RelationalNetwork(nn.Module):
def __init__(self, hidden_size, output_size, config):
''' Relational network that takes a list of imgs, projects
each with a conv and then take each i-j tuple and runs an RN
over it. Finally, the model returns a DNN over the summed
values of the RN.
'''
super(RelationalNetwork, self).__init__()
self.config = config
self.hidden_size = hidden_size
self.output_size = output_size
# build the projector and the rn models
self.proj = self._build_proj_model()
self.rn = self._build_rn_model()
def _build_proj_model(self):
return build_dense_encoder(self.hidden_size, self.output_size,
normalization_str='batchnorm')#self.config['normalization'])
def _lazy_build_image_model(self, input_size):
if not hasattr(self, 'conv'):
builder_fn = build_dense_encoder \
if self.config['layer_type'] == 'dense' else build_conv_encoder
self.img_model = nn.Sequential(
builder_fn(input_size, self.hidden_size,
normalization_str=self.config['normalization']),
nn.SELU())
if self.config['cuda']:
self.img_model = self.img_model.cuda()
return self.img_model
def _build_rn_model(self):
return nn.Sequential(
build_dense_encoder(self.hidden_size*2, self.hidden_size,
normalization_str='batchnorm'),#self.config['normalization']),
nn.SELU())
def forward(self, imgs):
assert type(imgs) == list, "need a set of input images"
# get all the conv outputs
self._lazy_build_image_model(list(imgs[0].size())[1:])
conv_output = [self.img_model(img) for img in imgs]
# project each x_i : x_j tuple through the RN
rn_buffer = [self.rn(torch.cat([img_r, img_l], -1)).unsqueeze(0)
for img_r in conv_output
for img_l in conv_output]
rn_buffer = torch.mean(torch.cat(rn_buffer, 0), 0)
# return the summed buffer projected through a DNN
return self.proj(rn_buffer)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: spaceone/api/monitoring/v1/escalation_policy.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from spaceone.api.core.v1 import query_pb2 as spaceone_dot_api_dot_core_dot_v1_dot_query__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='spaceone/api/monitoring/v1/escalation_policy.proto',
package='spaceone.api.monitoring.v1',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n2spaceone/api/monitoring/v1/escalation_policy.proto\x12\x1aspaceone.api.monitoring.v1\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1cgoogle/api/annotations.proto\x1a spaceone/api/core/v1/query.proto\"\xe5\x01\n\x14\x45scalationPolicyRule\x12^\n\x12notification_level\x18\x01 \x01(\x0e\x32\x42.spaceone.api.monitoring.v1.EscalationPolicyRule.NotificationLevel\x12\x18\n\x10\x65scalate_minutes\x18\x02 \x01(\x05\"S\n\x11NotificationLevel\x12\x08\n\x04NONE\x10\x00\x12\x07\n\x03\x41LL\x10\x01\x12\x07\n\x03LV1\x10\x02\x12\x07\n\x03LV2\x10\x03\x12\x07\n\x03LV3\x10\x04\x12\x07\n\x03LV4\x10\x05\x12\x07\n\x03LV5\x10\x06\"\x88\x03\n\x1d\x43reateEscalationPolicyRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12?\n\x05rules\x18\x02 \x03(\x0b\x32\x30.spaceone.api.monitoring.v1.EscalationPolicyRule\x12\x14\n\x0crepeat_count\x18\x03 \x01(\x05\x12m\n\x10\x66inish_condition\x18\x04 \x01(\x0e\x32S.spaceone.api.monitoring.v1.CreateEscalationPolicyRequest.EscalationFinishCondition\x12\x12\n\nproject_id\x18\x05 \x01(\t\x12%\n\x04tags\x18\x06 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x11\n\tdomain_id\x18\x0b \x01(\t\"E\n\x19\x45scalationFinishCondition\x12\x08\n\x04NONE\x10\x00\x12\x10\n\x0c\x41\x43KNOWLEDGED\x10\x01\x12\x0c\n\x08RESOLVED\x10\x02\"\x92\x03\n\x1dUpdateEscalationPolicyRequest\x12\x1c\n\x14\x65scalation_policy_id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12?\n\x05rules\x18\x03 \x03(\x0b\x32\x30.spaceone.api.monitoring.v1.EscalationPolicyRule\x12\x14\n\x0crepeat_count\x18\x04 \x01(\x05\x12m\n\x10\x66inish_condition\x18\x05 \x01(\x0e\x32S.spaceone.api.monitoring.v1.UpdateEscalationPolicyRequest.EscalationFinishCondition\x12%\n\x04tags\x18\x06 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x11\n\tdomain_id\x18\x0b \x01(\t\"E\n\x19\x45scalationFinishCondition\x12\x08\n\x04NONE\x10\x00\x12\x10\n\x0c\x41\x43KNOWLEDGED\x10\x01\x12\x0c\n\x08RESOLVED\x10\x02\"J\n\x17\x45scalationPolicyRequest\x12\x1c\n\x14\x65scalation_policy_id\x18\x01 \x01(\t\x12\x11\n\tdomain_id\x18\x02 \x01(\t\"[\n\x1aGetEscalationPolicyRequest\x12\x1c\n\x14\x65scalation_policy_id\x18\x01 \x01(\t\x12\x11\n\tdomain_id\x18\x02 \x01(\t\x12\x0c\n\x04only\x18\x03 \x03(\t\"\xf2\x03\n\x15\x45scalationPolicyQuery\x12*\n\x05query\x18\x01 \x01(\x0b\x32\x1b.spaceone.api.core.v1.Query\x12\x1c\n\x14\x65scalation_policy_id\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x12\n\nis_default\x18\x04 \x01(\x08\x12\x65\n\x10\x66inish_condition\x18\x05 \x01(\x0e\x32K.spaceone.api.monitoring.v1.EscalationPolicyQuery.EscalationFinishCondition\x12V\n\x05scope\x18\x06 \x01(\x0e\x32G.spaceone.api.monitoring.v1.EscalationPolicyQuery.EscalationPolicyScope\x12\x12\n\nproject_id\x18\x07 \x01(\t\x12\x11\n\tdomain_id\x18\x0b \x01(\t\"@\n\x15\x45scalationPolicyScope\x12\x0e\n\nSCOPE_NONE\x10\x00\x12\n\n\x06GLOBAL\x10\x01\x12\x0b\n\x07PROJECT\x10\x02\"E\n\x19\x45scalationFinishCondition\x12\x08\n\x04NONE\x10\x00\x12\x10\n\x0c\x41\x43KNOWLEDGED\x10\x01\x12\x0c\n\x08RESOLVED\x10\x02\"\xd5\x04\n\x14\x45scalationPolicyInfo\x12\x1c\n\x14\x65scalation_policy_id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x12\n\nis_default\x18\x03 \x01(\x08\x12?\n\x05rules\x18\x04 \x03(\x0b\x32\x30.spaceone.api.monitoring.v1.EscalationPolicyRule\x12\x14\n\x0crepeat_count\x18\x05 \x01(\x05\x12\x64\n\x10\x66inish_condition\x18\x06 \x01(\x0e\x32J.spaceone.api.monitoring.v1.EscalationPolicyInfo.EscalationFinishCondition\x12U\n\x05scope\x18\x07 \x01(\x0e\x32\x46.spaceone.api.monitoring.v1.EscalationPolicyInfo.EscalationPolicyScope\x12\x12\n\nproject_id\x18\x08 \x01(\t\x12%\n\x04tags\x18\t \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x11\n\tdomain_id\x18\x0b \x01(\t\x12\x12\n\ncreated_at\x18\x15 \x01(\t\"@\n\x15\x45scalationPolicyScope\x12\x0e\n\nSCOPE_NONE\x10\x00\x12\n\n\x06GLOBAL\x10\x01\x12\x0b\n\x07PROJECT\x10\x02\"E\n\x19\x45scalationFinishCondition\x12\x08\n\x04NONE\x10\x00\x12\x10\n\x0c\x41\x43KNOWLEDGED\x10\x01\x12\x0c\n\x08RESOLVED\x10\x02\"p\n\x16\x45scalationPoliciesInfo\x12\x41\n\x07results\x18\x01 \x03(\x0b\x32\x30.spaceone.api.monitoring.v1.EscalationPolicyInfo\x12\x13\n\x0btotal_count\x18\x02 \x01(\x05\"d\n\x19\x45scalationPolicyStatQuery\x12\x34\n\x05query\x18\x01 \x01(\x0b\x32%.spaceone.api.core.v1.StatisticsQuery\x12\x11\n\tdomain_id\x18\x02 \x01(\t2\xd2\t\n\x10\x45scalationPolicy\x12\xa1\x01\n\x06\x63reate\x12\x39.spaceone.api.monitoring.v1.CreateEscalationPolicyRequest\x1a\x30.spaceone.api.monitoring.v1.EscalationPolicyInfo\"*\x82\xd3\xe4\x93\x02$\"\"/monitoring/v1/escalation-policies\x12\xb6\x01\n\x06update\x12\x39.spaceone.api.monitoring.v1.UpdateEscalationPolicyRequest\x1a\x30.spaceone.api.monitoring.v1.EscalationPolicyInfo\"?\x82\xd3\xe4\x93\x02\x39\x1a\x37/monitoring/v1/escalation-policy/{escalation_policy_id}\x12\xc1\x01\n\x0bset_default\x12\x33.spaceone.api.monitoring.v1.EscalationPolicyRequest\x1a\x30.spaceone.api.monitoring.v1.EscalationPolicyInfo\"K\x82\xd3\xe4\x93\x02\x45\x1a\x43/monitoring/v1/escalation-policy/{escalation_policy_id}/set-default\x12\x96\x01\n\x06\x64\x65lete\x12\x33.spaceone.api.monitoring.v1.EscalationPolicyRequest\x1a\x16.google.protobuf.Empty\"?\x82\xd3\xe4\x93\x02\x39*7/monitoring/v1/escalation-policy/{escalation_policy_id}\x12\xb0\x01\n\x03get\x12\x36.spaceone.api.monitoring.v1.GetEscalationPolicyRequest\x1a\x30.spaceone.api.monitoring.v1.EscalationPolicyInfo\"?\x82\xd3\xe4\x93\x02\x39\x12\x37/monitoring/v1/escalation-policy/{escalation_policy_id}\x12\xc6\x01\n\x04list\x12\x31.spaceone.api.monitoring.v1.EscalationPolicyQuery\x1a\x32.spaceone.api.monitoring.v1.EscalationPoliciesInfo\"W\x82\xd3\xe4\x93\x02Q\x12\"/monitoring/v1/escalation-policiesZ+\")/monitoring/v1/escalation-policies/search\x12\x87\x01\n\x04stat\x12\x35.spaceone.api.monitoring.v1.EscalationPolicyStatQuery\x1a\x17.google.protobuf.Struct\"/\x82\xd3\xe4\x93\x02)\"\'/monitoring/v1/escalation-policies/statb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,spaceone_dot_api_dot_core_dot_v1_dot_query__pb2.DESCRIPTOR,])
_ESCALATIONPOLICYRULE_NOTIFICATIONLEVEL = _descriptor.EnumDescriptor(
name='NotificationLevel',
full_name='spaceone.api.monitoring.v1.EscalationPolicyRule.NotificationLevel',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='NONE', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ALL', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LV1', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LV2', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LV3', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LV4', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LV5', index=6, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=352,
serialized_end=435,
)
_sym_db.RegisterEnumDescriptor(_ESCALATIONPOLICYRULE_NOTIFICATIONLEVEL)
_CREATEESCALATIONPOLICYREQUEST_ESCALATIONFINISHCONDITION = _descriptor.EnumDescriptor(
name='EscalationFinishCondition',
full_name='spaceone.api.monitoring.v1.CreateEscalationPolicyRequest.EscalationFinishCondition',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='NONE', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACKNOWLEDGED', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='RESOLVED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=761,
serialized_end=830,
)
_sym_db.RegisterEnumDescriptor(_CREATEESCALATIONPOLICYREQUEST_ESCALATIONFINISHCONDITION)
_UPDATEESCALATIONPOLICYREQUEST_ESCALATIONFINISHCONDITION = _descriptor.EnumDescriptor(
name='EscalationFinishCondition',
full_name='spaceone.api.monitoring.v1.UpdateEscalationPolicyRequest.EscalationFinishCondition',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='NONE', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACKNOWLEDGED', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='RESOLVED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=761,
serialized_end=830,
)
_sym_db.RegisterEnumDescriptor(_UPDATEESCALATIONPOLICYREQUEST_ESCALATIONFINISHCONDITION)
_ESCALATIONPOLICYQUERY_ESCALATIONPOLICYSCOPE = _descriptor.EnumDescriptor(
name='EscalationPolicyScope',
full_name='spaceone.api.monitoring.v1.EscalationPolicyQuery.EscalationPolicyScope',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='SCOPE_NONE', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='GLOBAL', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PROJECT', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1770,
serialized_end=1834,
)
_sym_db.RegisterEnumDescriptor(_ESCALATIONPOLICYQUERY_ESCALATIONPOLICYSCOPE)
_ESCALATIONPOLICYQUERY_ESCALATIONFINISHCONDITION = _descriptor.EnumDescriptor(
name='EscalationFinishCondition',
full_name='spaceone.api.monitoring.v1.EscalationPolicyQuery.EscalationFinishCondition',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='NONE', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACKNOWLEDGED', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='RESOLVED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=761,
serialized_end=830,
)
_sym_db.RegisterEnumDescriptor(_ESCALATIONPOLICYQUERY_ESCALATIONFINISHCONDITION)
_ESCALATIONPOLICYINFO_ESCALATIONPOLICYSCOPE = _descriptor.EnumDescriptor(
name='EscalationPolicyScope',
full_name='spaceone.api.monitoring.v1.EscalationPolicyInfo.EscalationPolicyScope',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='SCOPE_NONE', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='GLOBAL', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PROJECT', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1770,
serialized_end=1834,
)
_sym_db.RegisterEnumDescriptor(_ESCALATIONPOLICYINFO_ESCALATIONPOLICYSCOPE)
_ESCALATIONPOLICYINFO_ESCALATIONFINISHCONDITION = _descriptor.EnumDescriptor(
name='EscalationFinishCondition',
full_name='spaceone.api.monitoring.v1.EscalationPolicyInfo.EscalationFinishCondition',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='NONE', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACKNOWLEDGED', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='RESOLVED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=761,
serialized_end=830,
)
_sym_db.RegisterEnumDescriptor(_ESCALATIONPOLICYINFO_ESCALATIONFINISHCONDITION)
_ESCALATIONPOLICYRULE = _descriptor.Descriptor(
name='EscalationPolicyRule',
full_name='spaceone.api.monitoring.v1.EscalationPolicyRule',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='notification_level', full_name='spaceone.api.monitoring.v1.EscalationPolicyRule.notification_level', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='escalate_minutes', full_name='spaceone.api.monitoring.v1.EscalationPolicyRule.escalate_minutes', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_ESCALATIONPOLICYRULE_NOTIFICATIONLEVEL,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=206,
serialized_end=435,
)
_CREATEESCALATIONPOLICYREQUEST = _descriptor.Descriptor(
name='CreateEscalationPolicyRequest',
full_name='spaceone.api.monitoring.v1.CreateEscalationPolicyRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='spaceone.api.monitoring.v1.CreateEscalationPolicyRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rules', full_name='spaceone.api.monitoring.v1.CreateEscalationPolicyRequest.rules', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='repeat_count', full_name='spaceone.api.monitoring.v1.CreateEscalationPolicyRequest.repeat_count', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='finish_condition', full_name='spaceone.api.monitoring.v1.CreateEscalationPolicyRequest.finish_condition', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_id', full_name='spaceone.api.monitoring.v1.CreateEscalationPolicyRequest.project_id', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tags', full_name='spaceone.api.monitoring.v1.CreateEscalationPolicyRequest.tags', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.monitoring.v1.CreateEscalationPolicyRequest.domain_id', index=6,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_CREATEESCALATIONPOLICYREQUEST_ESCALATIONFINISHCONDITION,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=438,
serialized_end=830,
)
_UPDATEESCALATIONPOLICYREQUEST = _descriptor.Descriptor(
name='UpdateEscalationPolicyRequest',
full_name='spaceone.api.monitoring.v1.UpdateEscalationPolicyRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='escalation_policy_id', full_name='spaceone.api.monitoring.v1.UpdateEscalationPolicyRequest.escalation_policy_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='spaceone.api.monitoring.v1.UpdateEscalationPolicyRequest.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rules', full_name='spaceone.api.monitoring.v1.UpdateEscalationPolicyRequest.rules', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='repeat_count', full_name='spaceone.api.monitoring.v1.UpdateEscalationPolicyRequest.repeat_count', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='finish_condition', full_name='spaceone.api.monitoring.v1.UpdateEscalationPolicyRequest.finish_condition', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tags', full_name='spaceone.api.monitoring.v1.UpdateEscalationPolicyRequest.tags', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.monitoring.v1.UpdateEscalationPolicyRequest.domain_id', index=6,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_UPDATEESCALATIONPOLICYREQUEST_ESCALATIONFINISHCONDITION,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=833,
serialized_end=1235,
)
_ESCALATIONPOLICYREQUEST = _descriptor.Descriptor(
name='EscalationPolicyRequest',
full_name='spaceone.api.monitoring.v1.EscalationPolicyRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='escalation_policy_id', full_name='spaceone.api.monitoring.v1.EscalationPolicyRequest.escalation_policy_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.monitoring.v1.EscalationPolicyRequest.domain_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1237,
serialized_end=1311,
)
_GETESCALATIONPOLICYREQUEST = _descriptor.Descriptor(
name='GetEscalationPolicyRequest',
full_name='spaceone.api.monitoring.v1.GetEscalationPolicyRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='escalation_policy_id', full_name='spaceone.api.monitoring.v1.GetEscalationPolicyRequest.escalation_policy_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.monitoring.v1.GetEscalationPolicyRequest.domain_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='only', full_name='spaceone.api.monitoring.v1.GetEscalationPolicyRequest.only', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1313,
serialized_end=1404,
)
_ESCALATIONPOLICYQUERY = _descriptor.Descriptor(
name='EscalationPolicyQuery',
full_name='spaceone.api.monitoring.v1.EscalationPolicyQuery',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='query', full_name='spaceone.api.monitoring.v1.EscalationPolicyQuery.query', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='escalation_policy_id', full_name='spaceone.api.monitoring.v1.EscalationPolicyQuery.escalation_policy_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='spaceone.api.monitoring.v1.EscalationPolicyQuery.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='is_default', full_name='spaceone.api.monitoring.v1.EscalationPolicyQuery.is_default', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='finish_condition', full_name='spaceone.api.monitoring.v1.EscalationPolicyQuery.finish_condition', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='scope', full_name='spaceone.api.monitoring.v1.EscalationPolicyQuery.scope', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_id', full_name='spaceone.api.monitoring.v1.EscalationPolicyQuery.project_id', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.monitoring.v1.EscalationPolicyQuery.domain_id', index=7,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_ESCALATIONPOLICYQUERY_ESCALATIONPOLICYSCOPE,
_ESCALATIONPOLICYQUERY_ESCALATIONFINISHCONDITION,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1407,
serialized_end=1905,
)
_ESCALATIONPOLICYINFO = _descriptor.Descriptor(
name='EscalationPolicyInfo',
full_name='spaceone.api.monitoring.v1.EscalationPolicyInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='escalation_policy_id', full_name='spaceone.api.monitoring.v1.EscalationPolicyInfo.escalation_policy_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='spaceone.api.monitoring.v1.EscalationPolicyInfo.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='is_default', full_name='spaceone.api.monitoring.v1.EscalationPolicyInfo.is_default', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rules', full_name='spaceone.api.monitoring.v1.EscalationPolicyInfo.rules', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='repeat_count', full_name='spaceone.api.monitoring.v1.EscalationPolicyInfo.repeat_count', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='finish_condition', full_name='spaceone.api.monitoring.v1.EscalationPolicyInfo.finish_condition', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='scope', full_name='spaceone.api.monitoring.v1.EscalationPolicyInfo.scope', index=6,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_id', full_name='spaceone.api.monitoring.v1.EscalationPolicyInfo.project_id', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tags', full_name='spaceone.api.monitoring.v1.EscalationPolicyInfo.tags', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.monitoring.v1.EscalationPolicyInfo.domain_id', index=9,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='created_at', full_name='spaceone.api.monitoring.v1.EscalationPolicyInfo.created_at', index=10,
number=21, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_ESCALATIONPOLICYINFO_ESCALATIONPOLICYSCOPE,
_ESCALATIONPOLICYINFO_ESCALATIONFINISHCONDITION,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1908,
serialized_end=2505,
)
_ESCALATIONPOLICIESINFO = _descriptor.Descriptor(
name='EscalationPoliciesInfo',
full_name='spaceone.api.monitoring.v1.EscalationPoliciesInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='results', full_name='spaceone.api.monitoring.v1.EscalationPoliciesInfo.results', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='total_count', full_name='spaceone.api.monitoring.v1.EscalationPoliciesInfo.total_count', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2507,
serialized_end=2619,
)
_ESCALATIONPOLICYSTATQUERY = _descriptor.Descriptor(
name='EscalationPolicyStatQuery',
full_name='spaceone.api.monitoring.v1.EscalationPolicyStatQuery',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='query', full_name='spaceone.api.monitoring.v1.EscalationPolicyStatQuery.query', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.monitoring.v1.EscalationPolicyStatQuery.domain_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2621,
serialized_end=2721,
)
_ESCALATIONPOLICYRULE.fields_by_name['notification_level'].enum_type = _ESCALATIONPOLICYRULE_NOTIFICATIONLEVEL
_ESCALATIONPOLICYRULE_NOTIFICATIONLEVEL.containing_type = _ESCALATIONPOLICYRULE
_CREATEESCALATIONPOLICYREQUEST.fields_by_name['rules'].message_type = _ESCALATIONPOLICYRULE
_CREATEESCALATIONPOLICYREQUEST.fields_by_name['finish_condition'].enum_type = _CREATEESCALATIONPOLICYREQUEST_ESCALATIONFINISHCONDITION
_CREATEESCALATIONPOLICYREQUEST.fields_by_name['tags'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_CREATEESCALATIONPOLICYREQUEST_ESCALATIONFINISHCONDITION.containing_type = _CREATEESCALATIONPOLICYREQUEST
_UPDATEESCALATIONPOLICYREQUEST.fields_by_name['rules'].message_type = _ESCALATIONPOLICYRULE
_UPDATEESCALATIONPOLICYREQUEST.fields_by_name['finish_condition'].enum_type = _UPDATEESCALATIONPOLICYREQUEST_ESCALATIONFINISHCONDITION
_UPDATEESCALATIONPOLICYREQUEST.fields_by_name['tags'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_UPDATEESCALATIONPOLICYREQUEST_ESCALATIONFINISHCONDITION.containing_type = _UPDATEESCALATIONPOLICYREQUEST
_ESCALATIONPOLICYQUERY.fields_by_name['query'].message_type = spaceone_dot_api_dot_core_dot_v1_dot_query__pb2._QUERY
_ESCALATIONPOLICYQUERY.fields_by_name['finish_condition'].enum_type = _ESCALATIONPOLICYQUERY_ESCALATIONFINISHCONDITION
_ESCALATIONPOLICYQUERY.fields_by_name['scope'].enum_type = _ESCALATIONPOLICYQUERY_ESCALATIONPOLICYSCOPE
_ESCALATIONPOLICYQUERY_ESCALATIONPOLICYSCOPE.containing_type = _ESCALATIONPOLICYQUERY
_ESCALATIONPOLICYQUERY_ESCALATIONFINISHCONDITION.containing_type = _ESCALATIONPOLICYQUERY
_ESCALATIONPOLICYINFO.fields_by_name['rules'].message_type = _ESCALATIONPOLICYRULE
_ESCALATIONPOLICYINFO.fields_by_name['finish_condition'].enum_type = _ESCALATIONPOLICYINFO_ESCALATIONFINISHCONDITION
_ESCALATIONPOLICYINFO.fields_by_name['scope'].enum_type = _ESCALATIONPOLICYINFO_ESCALATIONPOLICYSCOPE
_ESCALATIONPOLICYINFO.fields_by_name['tags'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_ESCALATIONPOLICYINFO_ESCALATIONPOLICYSCOPE.containing_type = _ESCALATIONPOLICYINFO
_ESCALATIONPOLICYINFO_ESCALATIONFINISHCONDITION.containing_type = _ESCALATIONPOLICYINFO
_ESCALATIONPOLICIESINFO.fields_by_name['results'].message_type = _ESCALATIONPOLICYINFO
_ESCALATIONPOLICYSTATQUERY.fields_by_name['query'].message_type = spaceone_dot_api_dot_core_dot_v1_dot_query__pb2._STATISTICSQUERY
DESCRIPTOR.message_types_by_name['EscalationPolicyRule'] = _ESCALATIONPOLICYRULE
DESCRIPTOR.message_types_by_name['CreateEscalationPolicyRequest'] = _CREATEESCALATIONPOLICYREQUEST
DESCRIPTOR.message_types_by_name['UpdateEscalationPolicyRequest'] = _UPDATEESCALATIONPOLICYREQUEST
DESCRIPTOR.message_types_by_name['EscalationPolicyRequest'] = _ESCALATIONPOLICYREQUEST
DESCRIPTOR.message_types_by_name['GetEscalationPolicyRequest'] = _GETESCALATIONPOLICYREQUEST
DESCRIPTOR.message_types_by_name['EscalationPolicyQuery'] = _ESCALATIONPOLICYQUERY
DESCRIPTOR.message_types_by_name['EscalationPolicyInfo'] = _ESCALATIONPOLICYINFO
DESCRIPTOR.message_types_by_name['EscalationPoliciesInfo'] = _ESCALATIONPOLICIESINFO
DESCRIPTOR.message_types_by_name['EscalationPolicyStatQuery'] = _ESCALATIONPOLICYSTATQUERY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
EscalationPolicyRule = _reflection.GeneratedProtocolMessageType('EscalationPolicyRule', (_message.Message,), {
'DESCRIPTOR' : _ESCALATIONPOLICYRULE,
'__module__' : 'spaceone.api.monitoring.v1.escalation_policy_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.EscalationPolicyRule)
})
_sym_db.RegisterMessage(EscalationPolicyRule)
CreateEscalationPolicyRequest = _reflection.GeneratedProtocolMessageType('CreateEscalationPolicyRequest', (_message.Message,), {
'DESCRIPTOR' : _CREATEESCALATIONPOLICYREQUEST,
'__module__' : 'spaceone.api.monitoring.v1.escalation_policy_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.CreateEscalationPolicyRequest)
})
_sym_db.RegisterMessage(CreateEscalationPolicyRequest)
UpdateEscalationPolicyRequest = _reflection.GeneratedProtocolMessageType('UpdateEscalationPolicyRequest', (_message.Message,), {
'DESCRIPTOR' : _UPDATEESCALATIONPOLICYREQUEST,
'__module__' : 'spaceone.api.monitoring.v1.escalation_policy_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.UpdateEscalationPolicyRequest)
})
_sym_db.RegisterMessage(UpdateEscalationPolicyRequest)
EscalationPolicyRequest = _reflection.GeneratedProtocolMessageType('EscalationPolicyRequest', (_message.Message,), {
'DESCRIPTOR' : _ESCALATIONPOLICYREQUEST,
'__module__' : 'spaceone.api.monitoring.v1.escalation_policy_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.EscalationPolicyRequest)
})
_sym_db.RegisterMessage(EscalationPolicyRequest)
GetEscalationPolicyRequest = _reflection.GeneratedProtocolMessageType('GetEscalationPolicyRequest', (_message.Message,), {
'DESCRIPTOR' : _GETESCALATIONPOLICYREQUEST,
'__module__' : 'spaceone.api.monitoring.v1.escalation_policy_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.GetEscalationPolicyRequest)
})
_sym_db.RegisterMessage(GetEscalationPolicyRequest)
EscalationPolicyQuery = _reflection.GeneratedProtocolMessageType('EscalationPolicyQuery', (_message.Message,), {
'DESCRIPTOR' : _ESCALATIONPOLICYQUERY,
'__module__' : 'spaceone.api.monitoring.v1.escalation_policy_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.EscalationPolicyQuery)
})
_sym_db.RegisterMessage(EscalationPolicyQuery)
EscalationPolicyInfo = _reflection.GeneratedProtocolMessageType('EscalationPolicyInfo', (_message.Message,), {
'DESCRIPTOR' : _ESCALATIONPOLICYINFO,
'__module__' : 'spaceone.api.monitoring.v1.escalation_policy_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.EscalationPolicyInfo)
})
_sym_db.RegisterMessage(EscalationPolicyInfo)
EscalationPoliciesInfo = _reflection.GeneratedProtocolMessageType('EscalationPoliciesInfo', (_message.Message,), {
'DESCRIPTOR' : _ESCALATIONPOLICIESINFO,
'__module__' : 'spaceone.api.monitoring.v1.escalation_policy_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.EscalationPoliciesInfo)
})
_sym_db.RegisterMessage(EscalationPoliciesInfo)
EscalationPolicyStatQuery = _reflection.GeneratedProtocolMessageType('EscalationPolicyStatQuery', (_message.Message,), {
'DESCRIPTOR' : _ESCALATIONPOLICYSTATQUERY,
'__module__' : 'spaceone.api.monitoring.v1.escalation_policy_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.EscalationPolicyStatQuery)
})
_sym_db.RegisterMessage(EscalationPolicyStatQuery)
_ESCALATIONPOLICY = _descriptor.ServiceDescriptor(
name='EscalationPolicy',
full_name='spaceone.api.monitoring.v1.EscalationPolicy',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=2724,
serialized_end=3958,
methods=[
_descriptor.MethodDescriptor(
name='create',
full_name='spaceone.api.monitoring.v1.EscalationPolicy.create',
index=0,
containing_service=None,
input_type=_CREATEESCALATIONPOLICYREQUEST,
output_type=_ESCALATIONPOLICYINFO,
serialized_options=b'\202\323\344\223\002$\"\"/monitoring/v1/escalation-policies',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='update',
full_name='spaceone.api.monitoring.v1.EscalationPolicy.update',
index=1,
containing_service=None,
input_type=_UPDATEESCALATIONPOLICYREQUEST,
output_type=_ESCALATIONPOLICYINFO,
serialized_options=b'\202\323\344\223\0029\0327/monitoring/v1/escalation-policy/{escalation_policy_id}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='set_default',
full_name='spaceone.api.monitoring.v1.EscalationPolicy.set_default',
index=2,
containing_service=None,
input_type=_ESCALATIONPOLICYREQUEST,
output_type=_ESCALATIONPOLICYINFO,
serialized_options=b'\202\323\344\223\002E\032C/monitoring/v1/escalation-policy/{escalation_policy_id}/set-default',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='delete',
full_name='spaceone.api.monitoring.v1.EscalationPolicy.delete',
index=3,
containing_service=None,
input_type=_ESCALATIONPOLICYREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\0029*7/monitoring/v1/escalation-policy/{escalation_policy_id}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='get',
full_name='spaceone.api.monitoring.v1.EscalationPolicy.get',
index=4,
containing_service=None,
input_type=_GETESCALATIONPOLICYREQUEST,
output_type=_ESCALATIONPOLICYINFO,
serialized_options=b'\202\323\344\223\0029\0227/monitoring/v1/escalation-policy/{escalation_policy_id}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='list',
full_name='spaceone.api.monitoring.v1.EscalationPolicy.list',
index=5,
containing_service=None,
input_type=_ESCALATIONPOLICYQUERY,
output_type=_ESCALATIONPOLICIESINFO,
serialized_options=b'\202\323\344\223\002Q\022\"/monitoring/v1/escalation-policiesZ+\")/monitoring/v1/escalation-policies/search',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='stat',
full_name='spaceone.api.monitoring.v1.EscalationPolicy.stat',
index=6,
containing_service=None,
input_type=_ESCALATIONPOLICYSTATQUERY,
output_type=google_dot_protobuf_dot_struct__pb2._STRUCT,
serialized_options=b'\202\323\344\223\002)\"\'/monitoring/v1/escalation-policies/stat',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_ESCALATIONPOLICY)
DESCRIPTOR.services_by_name['EscalationPolicy'] = _ESCALATIONPOLICY
# @@protoc_insertion_point(module_scope)
|
from django.urls import path
from game.views import EmailFormView
urlpatterns = [
path('emailform/', EmailFormView.as_view(), name='email_form'),
]
|
"""Import modules."""
import math
import os
import sys
import struct
import numpy
try:
from PIL import Image
except ImportError as e:
if sys.platform == 'linux2':
sys.stderr.write("PIL module not found, please install it with:\n")
sys.stderr.write("apt-get install python-pip\n")
sys.stderr.write("pip install pillow\n")
raise e
import numbers
from urdf2webots.gazebo_materials import materials
from urdf2webots.math_utils import convertRPYtoEulerAxis
try:
from collada import Collada, lineset
colladaIsAvailable = True
except ImportError:
colladaIsAvailable = False
counter = 0
# to pass from external
robotName = ''
disableMeshOptimization = False
class Trimesh():
"""Define triangular mesh object."""
def __init__(self):
"""Initializatization."""
self.coord = [] # list of coordinate points
self.coordIndex = [] # list of index of points
self.texCoord = [] # list of coordinate points for texture
self.texCoordIndex = [] # list of index for texture
self.normal = [] # list of normals
self.normalIndex = [] # list of index of normals
class Inertia():
"""Define inertia object."""
def __init__(self):
"""Initializatization."""
self.position = [0.0, 0.0, 0.0]
self.rotation = [1.0, 0.0, 0.0, 0.0]
self.mass = None
self.ixx = 1.0
self.ixy = 0.0
self.ixz = 0.0
self.iyy = 1.0
self.iyz = 0.0
self.izz = 1.0
class Box():
"""Define box object."""
def __init__(self):
"""Initializatization."""
self.x = 0.0
self.y = 0.0
self.z = 0.0
class Cylinder():
"""Define cylinder object."""
def __init__(self):
"""Initializatization."""
self.radius = 0.0
self.length = 0.0
class Sphere():
"""Define sphere object."""
def __init__(self):
"""Initializatization."""
self.radius = 0.0
class Geometry():
"""Define geometry object."""
reference = {}
def __init__(self):
"""Initializatization."""
self.box = Box()
self.cylinder = Cylinder()
self.sphere = Sphere()
self.trimesh = Trimesh()
self.scale = [1.0, 1.0, 1.0]
self.name = None
self.defName = None
self.lineset = False
class Color():
"""Define color object."""
def __init__(self, red=0.5, green=0.0, blue=0.0, alpha=1.0):
"""Initializatization."""
self.red = red
self.green = green
self.blue = blue
self.alpha = alpha
class Material():
"""Define material object."""
namedMaterial = {}
def __init__(self):
"""Initializatization."""
self.emission = Color(0.0, 0.0, 0.0, 1.0)
self.ambient = Color(0.0, 0.0, 0.0, 0.0)
self.diffuse = Color(0.5, 0.5, 0.5, 1.0)
self.specular = Color(0.0, 0.0, 0.0, 1.0)
self.shininess = None
self.index_of_refraction = 1.0
self.texture = ""
self.name = None
self.defName = None
def parseFromMaterialNode(self, node):
"""Parse a material node."""
if hasElement(node, 'color'):
colorElement = node.getElementsByTagName('color')[0]
colors = colorElement.getAttribute('rgba').split()
self.diffuse.r = float(colors[0])
self.diffuse.g = float(colors[1])
self.diffuse.b = float(colors[2])
self.diffuse.alpha = float(colors[3])
if node.hasAttribute('name'):
self.name = node.getAttribute('name')
if self.name not in Material.namedMaterial:
Material.namedMaterial[self.name] = self
else:
assert False
class Visual():
"""Define visual object."""
def __init__(self):
"""Initializatization."""
self.position = [0.0, 0.0, 0.0]
self.rotation = [1.0, 0.0, 0.0, 0.0]
self.geometry = Geometry()
self.material = Material()
class Collision():
"""Define collision object."""
def __init__(self):
"""Initializatization."""
self.position = [0.0, 0.0, 0.0]
self.rotation = [1.0, 0.0, 0.0, 0.0]
self.geometry = Geometry()
class Calibration():
"""Define calibration object."""
def __init__(self):
"""Initializatization."""
self.limit = 0.0
self.rising = True
class Dynamics():
"""Define dynamics object."""
def __init__(self):
"""Initializatization."""
self.damping = 0.0
self.friction = 0.0
class Limit():
"""Define joint limit object."""
def __init__(self):
"""Initializatization."""
self.lower = 0.0
self.upper = 0.0
self.effort = 10000 # if not specified in the URDF, there is no limit
self.velocity = 0.0
class Safety():
"""Define joint safety object."""
def __init__(self):
"""Initializatization."""
self.lower = 0.0
self.upper = 0.0
self.kPosition = 0.0
self.kVelocity = 0.0
class Link():
"""Define link object."""
def __init__(self):
"""Initializatization."""
self.name = 'default'
self.inertia = Inertia()
self.visual = []
self.collision = []
self.forceSensor = False
class Joint():
"""Define joint object."""
def __init__(self):
"""Initializatization."""
self.name = 'default'
self.type = 'default'
self.position = [0.0, 0.0, 0.0]
self.rotation = [1.0, 0.0, 0.0, 0.0]
self.parent = 'default'
self.child = 'default'
self.axis = []
self.calibration = Calibration()
self.dynamics = Dynamics()
self.limit = Limit()
self.safety = Safety()
class IMU():
"""Define an IMU sensor."""
list = []
def __init__(self):
"""Initializatization."""
self.name = 'imu'
self.gaussianNoise = 0
self.parentLink = None
def export(self, file, indentationLevel):
"""Export this IMU."""
indent = ' '
# export InertialUnit
file.write(indentationLevel * indent + 'InertialUnit {\n')
file.write(indentationLevel * indent + ' name "%s inertial"\n' % self.name)
if self.gaussianNoise > 0:
file.write(indentationLevel * indent + ' noise %lf\n' % (self.gaussianNoise / (math.pi/2)))
file.write(indentationLevel * indent + '}\n')
# export Accelerometer
file.write(indentationLevel * indent + 'Accelerometer {\n')
file.write(indentationLevel * indent + ' name "%s accelerometer"\n' % self.name)
if self.gaussianNoise > 0:
file.write(indentationLevel * indent + ' lookupTable [-100 -100 %lf, 100 100 %lf]\n' %
(-self.gaussianNoise / 100.0, self.gaussianNoise / 100.0))
file.write(indentationLevel * indent + '}\n')
# export Gyro
file.write(indentationLevel * indent + 'Gyro {\n')
file.write(indentationLevel * indent + ' name "%s gyro"\n' % self.name)
if self.gaussianNoise > 0:
file.write(indentationLevel * indent + ' lookupTable [-100 -100 %lf, 100 100 %lf]\n' %
(-self.gaussianNoise / 100.0, self.gaussianNoise / 100.0))
file.write(indentationLevel * indent + '}\n')
# export Compass
file.write(indentationLevel * indent + 'Compass {\n')
file.write(indentationLevel * indent + ' name "%s compass"\n' % self.name)
if self.gaussianNoise > 0:
file.write(indentationLevel * indent + ' lookupTable [-1 -1 %lf, 1 1 %lf]\n' %
(-self.gaussianNoise, self.gaussianNoise))
file.write(indentationLevel * indent + '}\n')
class P3D():
"""Define P3D (ground truth pose)."""
list = []
def __init__(self):
"""Initializatization."""
self.name = 'p3d'
self.gaussianNoise = 0
self.noiseCorrelation = 0
self.speedNoise = 0
self.parentLink = None
def export(self, file, indentationLevel):
"""Export this P3D."""
indent = ' '
# export GPS
file.write(indentationLevel * indent + 'GPS {\n')
file.write(indentationLevel * indent + ' name "%s gps"\n' % self.name)
if self.noiseCorrelation > 0:
file.write(indentationLevel * indent + ' noiseCorrelation %lf\n' % self.noiseCorrelation)
if self.speedNoise > 0:
file.write(indentationLevel * indent + ' speedNoise %lf\n' % self.speedNoise)
file.write(indentationLevel * indent + '}\n')
# export InertialUnit
file.write(indentationLevel * indent + 'InertialUnit {\n')
file.write(indentationLevel * indent + ' name "%s inertial"\n' % self.name)
if self.gaussianNoise > 0:
file.write(indentationLevel * indent + ' noise %lf\n' % (self.gaussianNoise / (math.pi/2)))
file.write(indentationLevel * indent + '}\n')
# export Gyro
file.write(indentationLevel * indent + 'Gyro {\n')
file.write(indentationLevel * indent + ' name "%s gyro"\n' % self.name)
if self.gaussianNoise > 0:
file.write(indentationLevel * indent + ' lookupTable [-100 -100 %lf, 100 100 %lf]\n' %
(-self.gaussianNoise / 100.0, self.gaussianNoise / 100.0))
file.write(indentationLevel * indent + '}\n')
class Camera():
"""Define a camera sensor."""
list = []
def __init__(self):
"""Initializatization."""
self.name = 'camera'
self.fov = None
self.width = None
self.height = None
self.noise = None
def export(self, file, indentationLevel):
"""Export this camera."""
indent = ' '
file.write(indentationLevel * indent + 'Camera {\n')
# rotation to convert from REP103 to webots viewport
file.write(indentationLevel * indent + ' rotation 1.0 0.0 0.0 3.141591\n')
file.write(indentationLevel * indent + ' name "%s"\n' % self.name)
if self.fov:
file.write(indentationLevel * indent + ' fieldOfView %lf\n' % self.fov)
if self.width:
file.write(indentationLevel * indent + ' width %d\n' % self.width)
if self.height:
file.write(indentationLevel * indent + ' height %d\n' % self.height)
if self.noise:
file.write(indentationLevel * indent + ' noise %lf\n' % self.noise)
file.write(indentationLevel * indent + '}\n')
class Lidar():
"""Define a lidar sensor."""
list = []
def __init__(self):
"""Initializatization."""
self.name = 'lidar'
self.fov = None
self.verticalFieldOfView = None
self.horizontalResolution = None
self.numberOfLayers = 1
self.minRange = None
self.maxRange = None
self.resolution = None
self.noise = None
def export(self, file, indentationLevel):
"""Export this camera."""
indent = ' '
file.write(indentationLevel * indent + 'Lidar {\n')
file.write(indentationLevel * indent + ' name "%s"\n' % self.name)
if self.fov:
file.write(indentationLevel * indent + ' fieldOfView %lf\n' % self.fov)
if self.verticalFieldOfView:
file.write(indentationLevel * indent + ' verticalFieldOfView %lf\n' % self.verticalFieldOfView)
if self.horizontalResolution:
file.write(indentationLevel * indent + ' horizontalResolution %d\n' % self.horizontalResolution)
if self.numberOfLayers:
file.write(indentationLevel * indent + ' numberOfLayers %d\n' % self.numberOfLayers)
if self.minRange:
if self.minRange < 0.01:
file.write(indentationLevel * indent + ' near %lf\n' % self.minRange)
file.write(indentationLevel * indent + ' minRange %lf\n' % self.minRange)
if self.maxRange:
file.write(indentationLevel * indent + ' maxRange %lf\n' % self.maxRange)
if self.noise:
file.write(indentationLevel * indent + ' noise %lf\n' % self.noise)
if self.resolution:
file.write(indentationLevel * indent + ' resolution %lf\n' % self.resolution)
file.write(indentationLevel * indent + '}\n')
def colorVector2Instance(cv, alpha_last=True):
"""Eval color object from a vector."""
c = Color()
if alpha_last:
c.red = cv[0]
c.green = cv[1]
c.blue = cv[2]
c.alpha = cv[3]
else:
c.red = cv[1]
c.green = cv[2]
c.blue = cv[3]
c.alpha = cv[0]
return c
def getRobotName(node):
"""Parse robot name."""
name = node.getAttribute('name')
print('Robot name: ' + name)
return name
def hasElement(node, element):
"""Check if exlement existing in a tag."""
if node.getElementsByTagName(element).length > 0:
return True
else:
return False
def getSTLMesh(filename, node):
"""Read stl file."""
print('Parsing Mesh: ' + filename)
stlFile = open(filename, 'rb')
stlFile.read(80)
numTriangles = struct.unpack("@i", stlFile.read(4))[0]
struct.unpack("<3f", stlFile.read(12))
a = struct.unpack("<3f", stlFile.read(12))
b = struct.unpack("<3f", stlFile.read(12))
c = struct.unpack("<3f", stlFile.read(12))
struct.unpack("H", stlFile.read(2))
trimesh = node.geometry.trimesh
trimesh.coord.append(a)
trimesh.coord.append(b)
trimesh.coord.append(c)
if numTriangles > 50000 and not disableMeshOptimization:
print('Warning: This mesh has a lot of triangles!')
print('Warning: It is recommended to use the script with the \'--disable-mesh-optimization\' argument.')
for i in range(1, numTriangles):
if i % 100 == 0:
sys.stdout.write('%d / %d\r' % (i, numTriangles))
struct.unpack("<3f", stlFile.read(12))
a = struct.unpack("<3f", stlFile.read(12))
indexA = None
if disableMeshOptimization or a not in trimesh.coord:
indexA = len(trimesh.coord)
trimesh.coord.append(a)
b = struct.unpack("<3f", stlFile.read(12))
indexB = None
if disableMeshOptimization or b not in trimesh.coord:
indexB = len(trimesh.coord)
trimesh.coord.append(b)
c = struct.unpack("<3f", stlFile.read(12))
indexC = None
if disableMeshOptimization or c not in trimesh.coord:
indexC = len(trimesh.coord)
trimesh.coord.append(c)
struct.unpack("H", stlFile.read(2))
trimesh.coordIndex.append([indexA if indexA is not None else trimesh.coord.index(a),
indexB if indexB is not None else trimesh.coord.index(b),
indexC if indexC is not None else trimesh.coord.index(c)])
stlFile.close()
return node
def getOBJMesh(filename, node, link):
"""read obj file."""
print('Parsing Mesh: ' + filename)
if hasattr(node, 'material') and node.material:
isVisual = True
else:
isVisual = False
with open(filename, 'r') as file:
counter = 0
indexOffset = 0
collision = None
for line in file:
header, body = line.split(' ', 1)
if header == '#':
continue
elif header == 'o':
if counter != 0:
if isVisual:
link.visual.append(collision)
else:
link.collision.append(collision)
indexOffset += len(collision.geometry.trimesh.coord)
if isVisual:
collision = Visual()
collision.material = node.material
else:
collision = Collision()
collision.position = node.position
collision.rotation = node.rotation
collision.geometry.scale = node.geometry.scale
extSring = '_%d' % (counter) if counter != 0 else ''
collision.geometry.name = '%s%s' % (os.path.splitext(os.path.basename(filename))[0], extSring)
counter += 1
elif header == 'f': # face
vertices = body.split()
coordIndex = []
texCoordIndex = []
normalIndex = []
for vertex in vertices:
indices = vertex.split('/')
coordIndex.append(int(indices[0]) - indexOffset - 1) # vertex coordinates
length = len(indices)
if length > 1 and indices[1]: # texture coordinates
texCoordIndex.append(int(indices[1]) - indexOffset - 1)
if length > 2 and indices[2]: # normal coordinates
normalIndex.append(int(indices[2]) - indexOffset - 1)
collision.geometry.trimesh.coordIndex.append(coordIndex)
if texCoordIndex:
collision.geometry.trimesh.texCoordIndex.append(texCoordIndex)
if normalIndex:
collision.geometry.trimesh.normalIndex.append(normalIndex)
elif header == 'l': # line, ignored
continue
elif header == 'v': # vertex
x, y, z = body.split()
collision.geometry.trimesh.coord.append([float(x), float(y), float(z)])
elif header == 'vn': # normals
x, y, z = body.split()
collision.geometry.trimesh.normal.append([float(x), float(y), float(z)])
elif header == 'vp': # parameters, ignored
continue
elif header == 'vt': # texture coordinate
texCoord = body.split()
if len(texCoord) < 1: # v argument is optional and defaults to 0
texCoord.append('0')
collision.geometry.trimesh.texCoord.append([float(texCoord[0]), float(texCoord[1])])
continue
if isVisual:
link.visual.append(collision)
else:
link.collision.append(collision)
def getColladaMesh(filename, node, link):
"""Read collada file."""
if not colladaIsAvailable:
sys.stderr.write('Collada module not found, please install it with:\n')
sys.stderr.write(' python -m pip install pycollada\n')
sys.stderr.write('Skipping "%s"\n' % filename)
return
print('Parsing Mesh: ' + filename)
colladaMesh = Collada(filename)
index = -1
if hasattr(node, 'material') and node.material:
for geometry in list(colladaMesh.scene.objects('geometry')):
for data in list(geometry.primitives()):
visual = Visual()
index += 1
visual.position = node.position
visual.rotation = node.rotation
visual.material.diffuse.red = node.material.diffuse.red
visual.material.diffuse.green = node.material.diffuse.green
visual.material.diffuse.blue = node.material.diffuse.blue
visual.material.diffuse.alpha = node.material.diffuse.alpha
visual.material.texture = node.material.texture
name = '%s_%d' % (os.path.splitext(os.path.basename(filename))[0], index)
if type(data.original) is lineset.LineSet:
visual.geometry.lineset = True
if name in Geometry.reference:
visual.geometry = Geometry.reference[name]
else:
Geometry.reference[name] = visual.geometry
visual.geometry.name = name
visual.geometry.scale = node.geometry.scale
for val in data.vertex:
visual.geometry.trimesh.coord.append(numpy.array(val))
for val in data.vertex_index:
visual.geometry.trimesh.coordIndex.append(val)
if data.texcoordset: # non-empty
for val in data.texcoordset[0]:
visual.geometry.trimesh.texCoord.append(val)
if data.texcoord_indexset: # non-empty
for val in data.texcoord_indexset[0]:
visual.geometry.trimesh.texCoordIndex.append(val)
if hasattr(data, '_normal') and data._normal is not None and data._normal.size > 0:
for val in data._normal:
visual.geometry.trimesh.normal.append(numpy.array(val))
if hasattr(data, '_normal_index') and data._normal_index is not None and data._normal_index.size > 0:
for val in data._normal_index:
visual.geometry.trimesh.normalIndex.append(val)
if data.material and data.material.effect:
if data.material.effect.emission and isinstance(data.material.effect.emission, tuple):
visual.material.emission = colorVector2Instance(data.material.effect.emission)
if data.material.effect.ambient and isinstance(data.material.effect.ambient, tuple):
visual.material.ambient = colorVector2Instance(data.material.effect.ambient)
if data.material.effect.specular and isinstance(data.material.effect.specular, tuple):
visual.material.specular = colorVector2Instance(data.material.effect.specular)
if data.material.effect.shininess:
visual.material.shininess = data.material.effect.shininess
if data.material.effect.index_of_refraction:
visual.material.index_of_refraction = data.material.effect.index_of_refraction
if data.material.effect.diffuse:
if numpy.size(data.material.effect.diffuse) > 1\
and all([isinstance(x, numbers.Number) for x in data.material.effect.diffuse]):
# diffuse is defined by values
visual.material.diffuse = colorVector2Instance(data.material.effect.diffuse)
else:
# diffuse is defined by *.tif files
visual.material.texture = 'textures/' + \
data.material.effect.diffuse.sampler.surface.image.path.split('/')[-1]
txt = os.path.splitext(visual.material.texture)[1]
if txt == '.tiff' or txt == '.tif':
for dirname, dirnames, filenames in os.walk('.'):
for file in filenames:
if file == str(visual.material.texture.split('/')[-1]):
try:
tifImage = Image.open(os.path.join(dirname, file))
img = './' + robotName + '_textures'
tifImage.save(os.path.splitext(os.path.join(img, file))[0] + '.png')
visual.material.texture = (robotName +
'_textures/' + os.path.splitext(file)[0] + '.png')
print('translated image ' + visual.material.texture)
except IOError:
visual.material.texture = ""
print('failed to open ' + os.path.join(dirname, file))
else:
visual.material.diffuse = colorVector2Instance([1.0, 1.0, 1.0, 1.0])
link.visual.append(visual)
else:
for geometry in list(colladaMesh.scene.objects('geometry')):
for data in list(geometry.primitives()):
collision = Collision()
collision.position = node.position
collision.rotation = node.rotation
collision.geometry.scale = node.geometry.scale
for value in data.vertex:
collision.geometry.trimesh.coord.append(numpy.array(value))
for value in data.vertex_index:
collision.geometry.trimesh.coordIndex.append(value)
link.collision.append(collision)
def getPosition(node):
"""Read position of a phsical or visual object."""
position = [0.0, 0.0, 0.0]
positionString = node.getElementsByTagName('origin')[0].getAttribute('xyz').split()
position[0] = float(positionString[0])
position[1] = float(positionString[1])
position[2] = float(positionString[2])
return position
def getRotation(node, isCylinder=False):
"""Read rotation of a phsical or visual object."""
rotation = [0.0, 0.0, 0.0]
if hasElement(node, 'origin'):
orientationString = node.getElementsByTagName('origin')[0].getAttribute('rpy').split()
rotation[0] = float(orientationString[0])
rotation[1] = float(orientationString[1])
rotation[2] = float(orientationString[2])
if isCylinder:
return convertRPYtoEulerAxis(rotation, True)
else:
return convertRPYtoEulerAxis(rotation, False)
def getInertia(node):
"""Parse inertia of a link."""
inertia = Inertia()
inertialElement = node.getElementsByTagName('inertial')[0]
if hasElement(inertialElement, 'origin'):
if inertialElement.getElementsByTagName('origin')[0].getAttribute('xyz'):
inertia.position = getPosition(inertialElement)
if inertialElement.getElementsByTagName('origin')[0].getAttribute('rpy'):
inertia.rotation = getRotation(inertialElement)
if hasElement(inertialElement, 'mass'):
inertia.mass = float(inertialElement.getElementsByTagName('mass')[0].getAttribute('value'))
if hasElement(inertialElement, 'inertia'):
matrixNode = inertialElement.getElementsByTagName('inertia')[0]
inertia.ixx = float(matrixNode.getAttribute('ixx'))
inertia.ixy = float(matrixNode.getAttribute('ixy'))
inertia.ixz = float(matrixNode.getAttribute('ixz'))
inertia.iyy = float(matrixNode.getAttribute('iyy'))
inertia.iyz = float(matrixNode.getAttribute('iyz'))
inertia.izz = float(matrixNode.getAttribute('izz'))
return inertia
def getVisual(link, node, path):
"""Parse visual data of a link."""
for index in range(0, len(node.getElementsByTagName('visual'))):
visual = Visual()
visualElement = node.getElementsByTagName('visual')[index]
if hasElement(visualElement, 'origin'):
if visualElement.getElementsByTagName('origin')[0].getAttribute('xyz'):
visual.position = getPosition(visualElement)
if visualElement.getElementsByTagName('origin')[0].getAttribute('rpy'):
if hasElement(visualElement.getElementsByTagName('geometry')[0], 'cylinder'):
visual.rotation = getRotation(visualElement, True)
else:
visual.rotation = getRotation(visualElement)
elif hasElement(visualElement.getElementsByTagName('geometry')[0], 'cylinder'):
visual.rotation = getRotation(visualElement, True)
geometryElement = visualElement.getElementsByTagName('geometry')[0]
if hasElement(visualElement, 'material'):
material = visualElement.getElementsByTagName('material')[0]
if material.hasAttribute('name') and material.getAttribute('name') in Material.namedMaterial:
visual.material = Material.namedMaterial[material.getAttribute('name')]
elif hasElement(material, 'color'):
colorElement = material.getElementsByTagName('color')[0].getAttribute('rgba').split()
visual.material.diffuse.red = float(colorElement[0])
visual.material.diffuse.green = float(colorElement[1])
visual.material.diffuse.blue = float(colorElement[2])
visual.material.diffuse.alpha = float(colorElement[3])
if material.hasAttribute('name'):
if material.getAttribute('name'):
visual.material.name = material.getAttribute('name')
else:
visual.material.name = node.getAttribute('name') + '_material'
Material.namedMaterial[visual.material.name] = visual.material
elif material.firstChild and material.firstChild.nodeValue in materials:
materialName = material.firstChild.nodeValue
visual.material.diffuse.red = float(materials[materialName]['diffuse'][0])
visual.material.diffuse.green = float(materials[materialName]['diffuse'][1])
visual.material.diffuse.blue = float(materials[materialName]['diffuse'][2])
visual.material.diffuse.alpha = float(materials[materialName]['diffuse'][3])
visual.material.ambient.red = float(materials[materialName]['ambient'][0])
visual.material.ambient.green = float(materials[materialName]['ambient'][1])
visual.material.ambient.blue = float(materials[materialName]['ambient'][2])
visual.material.ambient.alpha = float(materials[materialName]['ambient'][3])
visual.material.specular.red = float(materials[materialName]['specular'][0])
visual.material.specular.green = float(materials[materialName]['specular'][1])
visual.material.specular.blue = float(materials[materialName]['specular'][2])
visual.material.specular.alpha = float(materials[materialName]['specular'][3])
visual.material.name = materialName
Material.namedMaterial[materialName] = visual.material
if hasElement(material, 'texture'):
visual.material.texture = material.getElementsByTagName('texture')[0].getAttribute('filename')
if os.path.splitext(visual.material.texture)[1] == '.tiff' \
or os.path.splitext(visual.material.texture)[1] == '.tif':
for dirname, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename == str(visual.material.texture.split('/')[-1]):
print('try to translate image ' + filename)
try:
tifImage = Image.open(os.path.join(dirname, filename))
tifImage.save(os.path.splitext(os.path.join('./' + robotName + '_' + 'textures',
filename))[0] + '.png')
visual.material.texture = (robotName + '_' + 'textures/' +
os.path.splitext(filename)[0] + '.png')
except IOError:
visual.material.texture = ""
print('failed to open ' + os.path.join(dirname, filename))
if hasElement(geometryElement, 'box'):
visual.geometry.box.x = float(geometryElement.getElementsByTagName('box')[0].getAttribute('size').split()[0])
visual.geometry.box.y = float(geometryElement.getElementsByTagName('box')[0].getAttribute('size').split()[1])
visual.geometry.box.z = float(geometryElement.getElementsByTagName('box')[0].getAttribute('size').split()[2])
link.visual.append(visual)
elif hasElement(geometryElement, 'cylinder'):
visual.geometry.cylinder.radius = float(geometryElement.getElementsByTagName('cylinder')[0].getAttribute('radius'))
visual.geometry.cylinder.length = float(geometryElement.getElementsByTagName('cylinder')[0].getAttribute('length'))
link.visual.append(visual)
elif hasElement(geometryElement, 'sphere'):
visual.geometry.sphere.radius = float(geometryElement.getElementsByTagName('sphere')[0].getAttribute('radius'))
link.visual.append(visual)
elif hasElement(geometryElement, 'mesh'):
meshfile = geometryElement.getElementsByTagName('mesh')[0].getAttribute('filename')
if not os.path.isabs(meshfile):
meshfile = os.path.normpath(os.path.join(path, meshfile))
# hack for gazebo mesh database
if meshfile.count('package'):
idx0 = meshfile.find('package://')
meshfile = meshfile[idx0 + len('package://'):]
if geometryElement.getElementsByTagName('mesh')[0].getAttribute('scale'):
meshScale = geometryElement.getElementsByTagName('mesh')[0].getAttribute('scale').split()
visual.geometry.scale[0] = float(meshScale[0])
visual.geometry.scale[1] = float(meshScale[1])
visual.geometry.scale[2] = float(meshScale[2])
extension = os.path.splitext(meshfile)[1].lower()
if extension == '.dae':
getColladaMesh(meshfile, visual, link)
elif extension == '.obj':
getOBJMesh(meshfile, visual, link)
elif extension == '.stl':
name = os.path.splitext(os.path.basename(meshfile))[0]
if name in Geometry.reference:
visual.geometry = Geometry.reference[name]
else:
if extension == '.stl':
visual = getSTLMesh(meshfile, visual)
visual.geometry.name = name
Geometry.reference[name] = visual.geometry
link.visual.append(visual)
else:
print('Unsupported mesh format: \"' + extension + '\"')
def getCollision(link, node, path):
"""Parse collision of a link."""
for index in range(0, len(node.getElementsByTagName('collision'))):
collision = Collision()
collisionElement = node.getElementsByTagName('collision')[index]
if hasElement(collisionElement, 'origin'):
if collisionElement.getElementsByTagName('origin')[0].getAttribute('xyz'):
collision.position = getPosition(collisionElement)
if collisionElement.getElementsByTagName('origin')[0].getAttribute('rpy'):
if hasElement(collisionElement.getElementsByTagName('geometry')[0], 'cylinder'):
collision.rotation = getRotation(collisionElement, True)
else:
collision.rotation = getRotation(collisionElement)
elif hasElement(collisionElement.getElementsByTagName('geometry')[0], 'cylinder'):
collision.rotation = getRotation(collisionElement, True)
geometryElement = collisionElement.getElementsByTagName('geometry')[0]
if hasElement(geometryElement, 'box'):
size = geometryElement.getElementsByTagName('box')[0].getAttribute('size').split()
collision.geometry.box.x = float(size[0])
collision.geometry.box.y = float(size[1])
collision.geometry.box.z = float(size[2])
link.collision.append(collision)
elif hasElement(geometryElement, 'cylinder'):
element = geometryElement.getElementsByTagName('cylinder')[0]
collision.geometry.cylinder.radius = float(element.getAttribute('radius'))
collision.geometry.cylinder.length = float(element.getAttribute('length'))
link.collision.append(collision)
elif hasElement(geometryElement, 'sphere'):
collision.geometry.sphere.radius = float(geometryElement.getElementsByTagName('sphere')[0].getAttribute('radius'))
link.collision.append(collision)
elif hasElement(geometryElement, 'mesh'):
meshfile = os.path.normpath(os.path.join(path,
geometryElement.getElementsByTagName('mesh')[0].getAttribute('filename')))
if geometryElement.getElementsByTagName('mesh')[0].getAttribute('scale'):
meshScale = geometryElement.getElementsByTagName('mesh')[0].getAttribute('scale').split()
collision.geometry.scale[0] = float(meshScale[0])
collision.geometry.scale[1] = float(meshScale[1])
collision.geometry.scale[2] = float(meshScale[2])
# hack for gazebo mesh database
if meshfile.count('package'):
idx0 = meshfile.find('package://')
meshfile = meshfile[idx0 + len('package://'):]
extension = os.path.splitext(meshfile)[1].lower()
if extension == '.dae':
getColladaMesh(meshfile, collision, link)
elif extension == '.obj':
getOBJMesh(meshfile, collision, link)
elif extension == '.stl':
name = os.path.splitext(os.path.basename(meshfile))[0]
if name in Geometry.reference:
collision.geometry = Geometry.reference[name]
else:
if extension == '.stl':
collision.geometry.stl = getSTLMesh(meshfile, collision)
collision.geometry.name = name
Geometry.reference[name] = collision.geometry
link.collision.append(collision)
else:
print('Unsupported mesh format for collision: \"' + extension + '\"')
def getAxis(node):
"""Parse rotation axis of a joint."""
axis = [0.0, 0.0, 0.0]
axisElement = node.getElementsByTagName('axis')[0].getAttribute('xyz').split()
axis[0] = float(axisElement[0])
axis[1] = float(axisElement[1])
axis[2] = float(axisElement[2])
return axis
def getCalibration(node):
"""Get the URDF calibration tag."""
calibration = Calibration()
calibrationElement = node.getElementsByTagName('calibration')[0]
if hasElement(calibrationElement, 'rising'):
calibration.limit = calibrationElement.getAttribute('rising')
calibration.rising = True
else:
calibration.limit = calibrationElement.getAttribute('falling')
calibration.rising = False
return calibration
def getDynamics(node):
"""Parse dynamics parameters of a joint."""
dynamics = Dynamics()
dynamicsElement = node.getElementsByTagName('dynamics')[0]
if dynamicsElement.getAttribute('damping'):
dynamics.damping = float(dynamicsElement.getAttribute('damping'))
if dynamicsElement.getAttribute('friction'):
dynamics.friction = float(dynamicsElement.getAttribute('friction'))
return dynamics
def getLimit(node):
"""Get limits of a joint."""
limit = Limit()
limitElement = node.getElementsByTagName('limit')[0]
if limitElement.getAttribute('lower'):
limit.lower = float(limitElement.getAttribute('lower'))
if limitElement.getAttribute('upper'):
limit.upper = float(limitElement.getAttribute('upper'))
if float(limitElement.getAttribute('effort')) != 0:
limit.effort = float(limitElement.getAttribute('effort'))
limit.velocity = float(limitElement.getAttribute('velocity'))
return limit
def getSafety(node):
"""Get safety controller of a joint."""
safety = Safety()
if node.getElementsByTagName('safety_controller')[0].getAttribute('soft_lower_limit'):
safety.lower = float(node.getElementsByTagName('safety_controller')[0].getAttribute('soft_lower_limit'))
if node.getElementsByTagName('safety_controller')[0].getAttribute('soft_upper_limit'):
safety.upper = float(node.getElementsByTagName('safety_controller')[0].getAttribute('soft_upper_limit'))
if node.getElementsByTagName('safety_controller')[0].getAttribute('k_position'):
safety.kPosition = float(node.getElementsByTagName('safety_controller')[0].getAttribute('k_position'))
safety.kVelocity = float(node.getElementsByTagName('safety_controller')[0].getAttribute('k_velocity'))
return safety
def getLink(node, path):
"""Parse a link."""
link = Link()
link.name = node.getAttribute('name')
if hasElement(node, 'inertial'):
link.inertia = getInertia(node)
if hasElement(node, 'visual'):
getVisual(link, node, path)
if hasElement(node, 'collision'):
getCollision(link, node, path)
if not any([hasElement(node, 'inertial'), hasElement(node, 'visual'), hasElement(node, 'collision')]):
link.inertia.mass = None
return link
def getJoint(node):
"""Parse a joint."""
joint = Joint()
joint.name = node.getAttribute('name')
joint.type = node.getAttribute('type')
if hasElement(node, 'origin'):
if node.getElementsByTagName('origin')[0].getAttribute('xyz'):
joint.position = getPosition(node)
if node.getElementsByTagName('origin')[0].getAttribute('rpy'):
joint.rotation = getRotation(node)
joint.parent = node.getElementsByTagName('parent')[0].getAttribute('link')
joint.child = node.getElementsByTagName('child')[0].getAttribute('link')
if hasElement(node, 'axis'):
joint.axis = getAxis(node)
if hasElement(node, 'calibration'):
joint.calibration = getCalibration(node)
if hasElement(node, 'dynamics'):
joint.dynamics = getDynamics(node)
if hasElement(node, 'limit'):
joint.limit = getLimit(node)
if hasElement(node, 'safety_controller'):
joint.safety = getSafety(node)
return joint
def isRootLink(link, childList):
"""Check if a link is root link."""
for child in childList:
if link == child:
return False
return True
def parseGazeboElement(element, parentLink, linkList):
"""Parse a Gazebo element."""
if element.hasAttribute("reference") and any([link.name == element.getAttribute('reference') for link in linkList]):
parentLink = element.getAttribute("reference")
for plugin in element.getElementsByTagName('plugin'):
if plugin.hasAttribute('filename') and plugin.getAttribute('filename').startswith('libgazebo_ros_imu'):
imu = IMU()
imu.parentLink = parentLink
if hasElement(plugin, 'topicName'):
imu.name = plugin.getElementsByTagName('topicName')[0].firstChild.nodeValue
if hasElement(plugin, 'gaussianNoise'):
imu.gaussianNoise = float(plugin.getElementsByTagName('gaussianNoise')[0].firstChild.nodeValue)
IMU.list.append(imu)
elif plugin.hasAttribute('filename') and plugin.getAttribute('filename').startswith('libgazebo_ros_f3d'):
if hasElement(plugin, "bodyName"):
name = plugin.getElementsByTagName('bodyName')[0].firstChild.nodeValue
for link in linkList:
if link.name == name:
link.forceSensor = True
break
elif plugin.hasAttribute('filename') and plugin.getAttribute('filename').startswith('libgazebo_ros_p3d'):
p3d = P3D()
p3d.parentLink = parentLink
if hasElement(plugin, 'topicName'):
p3d.name = plugin.getElementsByTagName('topicName')[0].firstChild.nodeValue
if hasElement(plugin, "xyzOffsets"):
print('\033[1;33mWarning: URDF parser cannot handle \"xyzOffsets\" from p3d!\033[0m')
if hasElement(plugin, "rpyOffsets"):
print('\033[1;33mWarning: URDF parser cannot handle \"rpyOffsets\" from p3d!\033[0m')
P3D.list.append(p3d)
for sensorElement in element.getElementsByTagName('sensor'):
if sensorElement.getAttribute('type') == 'camera':
camera = Camera()
camera.parentLink = parentLink
camera.name = sensorElement.getAttribute('name')
if hasElement(sensorElement, 'camera'):
cameraElement = sensorElement.getElementsByTagName('camera')[0]
if hasElement(cameraElement, 'horizontal_fov'):
camera.fov = float(cameraElement.getElementsByTagName('horizontal_fov')[0].firstChild.nodeValue)
if hasElement(cameraElement, 'image'):
imageElement = cameraElement.getElementsByTagName('image')[0]
if hasElement(imageElement, 'width'):
camera.width = int(imageElement.getElementsByTagName('width')[0].firstChild.nodeValue)
if hasElement(imageElement, 'height'):
camera.height = int(imageElement.getElementsByTagName('height')[0].firstChild.nodeValue)
if hasElement(imageElement, 'format') \
and imageElement.getElementsByTagName('format')[0].firstChild.nodeValue != 'R8G8B8A8':
print('Unsupported "%s" image format, using "R8G8B8A8" instead.' %
str(imageElement.getElementsByTagName('format')[0].firstChild.nodeValue))
if hasElement(sensorElement, 'noise'):
noiseElement = sensorElement.getElementsByTagName('noise')[0]
if hasElement(noiseElement, 'stddev'):
camera.noise = float(noiseElement.getElementsByTagName('stddev')[0].firstChild.nodeValue)
Camera.list.append(camera)
elif sensorElement.getAttribute('type') == 'ray' or sensorElement.getAttribute('type') == 'gpu_ray':
lidar = Lidar()
lidar.parentLink = parentLink
lidar.name = sensorElement.getAttribute('name')
if hasElement(sensorElement, 'ray'):
rayElement = sensorElement.getElementsByTagName('ray')[0]
if hasElement(rayElement, 'scan'):
scanElement = rayElement.getElementsByTagName('scan')[0]
if hasElement(scanElement, 'horizontal'):
horizontalElement = scanElement.getElementsByTagName('horizontal')[0]
if hasElement(horizontalElement, 'samples'):
lidar.horizontalResolution = \
int(float(horizontalElement.getElementsByTagName('samples')[0].firstChild.nodeValue))
if hasElement(horizontalElement, 'min_angle') and hasElement(horizontalElement, 'max_angle'):
minAngle = float(horizontalElement.getElementsByTagName('min_angle')[0].firstChild.nodeValue)
maxAngle = float(horizontalElement.getElementsByTagName('max_angle')[0].firstChild.nodeValue)
lidar.fov = maxAngle - minAngle
if hasElement(scanElement, 'vertical'):
verticalElement = scanElement.getElementsByTagName('vertical')[0]
if hasElement(verticalElement, 'samples'):
lidar.numberOfLayers = \
int(verticalElement.getElementsByTagName('samples')[0].firstChild.nodeValue)
if hasElement(verticalElement, 'min_angle') and hasElement(verticalElement, 'max_angle'):
minAngle = float(verticalElement.getElementsByTagName('min_angle')[0].firstChild.nodeValue)
maxAngle = float(verticalElement.getElementsByTagName('max_angle')[0].firstChild.nodeValue)
lidar.verticalFieldOfView = maxAngle - minAngle
if hasElement(rayElement, 'range'):
rangeElement = rayElement.getElementsByTagName('range')[0]
if hasElement(rangeElement, 'min'):
lidar.minRange = float(rangeElement.getElementsByTagName('min')[0].firstChild.nodeValue)
if hasElement(rangeElement, 'max'):
lidar.maxRange = float(rangeElement.getElementsByTagName('max')[0].firstChild.nodeValue)
if hasElement(rangeElement, 'resolution'):
lidar.resolution = float(rangeElement.getElementsByTagName('resolution')[0].firstChild.nodeValue)
if hasElement(sensorElement, 'noise'):
noiseElement = sensorElement.getElementsByTagName('noise')[0]
if hasElement(noiseElement, 'stddev'):
lidar.noise = float(noiseElement.getElementsByTagName('stddev')[0].firstChild.nodeValue)
if lidar.maxRange:
lidar.noise /= lidar.maxRange
Lidar.list.append(lidar)
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import NotSupported
class _1broker (Exchange):
def describe(self):
return self.deep_extend(super(_1broker, self).describe(), {
'id': '_1broker',
'name': '1Broker',
'countries': 'US',
'rateLimit': 1500,
'version': 'v2',
'has': {
'publicAPI': False,
'CORS': True,
'fetchTrades': False,
'fetchOHLCV': True,
},
'timeframes': {
'1m': '60',
'15m': '900',
'1h': '3600',
'1d': '86400',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766021-420bd9fc-5ecb-11e7-8ed6-56d0081efed2.jpg',
'api': 'https://1broker.com/api',
'www': 'https://1broker.com',
'doc': 'https://1broker.com/?c=en/content/api-documentation',
},
'requiredCredentials': {
'apiKey': True,
'secret': False,
},
'api': {
'private': {
'get': [
'market/bars',
'market/categories',
'market/details',
'market/list',
'market/quotes',
'market/ticks',
'order/cancel',
'order/create',
'order/open',
'position/close',
'position/close_cancel',
'position/edit',
'position/history',
'position/open',
'position/shared/get',
'social/profile_statistics',
'social/profile_trades',
'user/bitcoin_deposit_address',
'user/details',
'user/overview',
'user/quota_status',
'user/transaction_log',
],
},
},
})
def fetch_categories(self):
response = self.privateGetMarketCategories()
# they return an empty string among their categories, wtf?
categories = response['response']
result = []
for i in range(0, len(categories)):
if categories[i]:
result.append(categories[i])
return result
def fetch_markets(self):
self_ = self # workaround for Babel bug(not passing `self` to _recursive() call)
categories = self.fetch_categories()
result = []
for c in range(0, len(categories)):
category = categories[c]
markets = self_.privateGetMarketList({
'category': category.lower(),
})
for p in range(0, len(markets['response'])):
market = markets['response'][p]
id = market['symbol']
symbol = None
base = None
quote = None
if (category == 'FOREX') or (category == 'CRYPTO'):
symbol = market['name']
parts = symbol.split('/')
base = parts[0]
quote = parts[1]
else:
base = id
quote = 'USD'
symbol = base + '/' + quote
base = self_.common_currency_code(base)
quote = self_.common_currency_code(quote)
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'info': market,
})
return result
def fetch_balance(self, params={}):
self.load_markets()
balance = self.privateGetUserOverview()
response = balance['response']
result = {
'info': response,
}
currencies = list(self.currencies.keys())
for c in range(0, len(currencies)):
currency = currencies[c]
result[currency] = self.account()
total = float(response['balance'])
result['BTC']['free'] = total
result['BTC']['total'] = total
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
response = self.privateGetMarketQuotes(self.extend({
'symbols': self.market_id(symbol),
}, params))
orderbook = response['response'][0]
timestamp = self.parse8601(orderbook['updated'])
bidPrice = float(orderbook['bid'])
askPrice = float(orderbook['ask'])
bid = [bidPrice, None]
ask = [askPrice, None]
return {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'bids': [bid],
'asks': [ask],
}
def fetch_trades(self, symbol):
raise NotSupported(self.id + ' fetchTrades() method not implemented yet')
def fetch_ticker(self, symbol, params={}):
self.load_markets()
result = self.privateGetMarketBars(self.extend({
'symbol': self.market_id(symbol),
'resolution': 60,
'limit': 1,
}, params))
ticker = result['response'][0]
timestamp = self.parse8601(ticker['date'])
open = float(ticker['o'])
close = float(ticker['c'])
change = close - open
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['h']),
'low': float(ticker['l']),
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': None,
'open': open,
'close': close,
'last': close,
'previousClose': None,
'change': change,
'percentage': change / open * 100,
'average': None,
'baseVolume': None,
'quoteVolume': None,
'info': ticker,
}
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return [
self.parse8601(ohlcv['date']),
float(ohlcv['o']),
float(ohlcv['h']),
float(ohlcv['l']),
float(ohlcv['c']),
None,
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'resolution': self.timeframes[timeframe],
}
if since is not None:
request['date_start'] = self.iso8601(since) # they also support date_end
if limit is not None:
request['limit'] = limit
result = self.privateGetMarketBars(self.extend(request, params))
return self.parse_ohlcvs(result['response'], market, timeframe, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
order = {
'symbol': self.market_id(symbol),
'margin': amount,
'direction': 'short' if (side == 'sell') else 'long',
'leverage': 1,
'type': side,
}
if type == 'limit':
order['price'] = price
else:
order['type'] += '_market'
result = self.privateGetOrderCreate(self.extend(order, params))
return {
'info': result,
'id': result['response']['order_id'],
}
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
return self.privatePostOrderCancel({'order_id': id})
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
self.check_required_credentials()
url = self.urls['api'] + '/' + self.version + '/' + path + '.php'
query = self.extend({'token': self.apiKey}, params)
url += '?' + self.urlencode(query)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
if 'warning' in response:
if response['warning']:
raise ExchangeError(self.id + ' ' + self.json(response))
if 'error' in response:
if response['error']:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
|
# coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union, Any
from datetime import datetime
class SmartHomeProtocol(Enum):
"""
Version of the Smart Home API. Default and recommended value is '3'. You may create a skill with version '2' for testing migration to version '3', but a skill submission using version '2' will not be certified.
Allowed enum values: [_2, _2_0, _3, _3_0]
"""
_2 = "2"
_2_0 = "2.0"
_3 = "3"
_3_0 = "3.0"
def to_dict(self):
# type: () -> Dict[str, Any]
"""Returns the model properties as a dict"""
result = {self.name: self.value}
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.value)
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (Any) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, SmartHomeProtocol):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (Any) -> bool
"""Returns true if both objects are not equal"""
return not self == other
|
#!/usr/bin/env python3
"""DCMTools for loading (compressed) DICOM studies and series.
This module provides various methods to load compressed archives or
a single directory, which can contain (multiple) DICOM studies / series.
"""
from __future__ import print_function
import tarfile
import os
import time
try:
# Python 2 compatibility
from StringIO import StringIO as IOBuffer
except ImportError:
from io import BytesIO as IOBuffer
import pydicom
import numpy as np
import scipy.ndimage.interpolation
def peek_study(path):
"""Peeks the `StudyInstanceUID` of a DICOM directory, that is,
reads the first DICOM file in that directory and returns
the associated ID.
Args:
path (str): The directory which contains at least one DICOM file.
Returns:
str or None: The StudyInstanceUID if possible, None otherwise.
"""
dcm = None
for s in os.listdir(path):
try:
dcm = pydicom.read_file(path + '/' + s)
break
except:
continue
if dcm is not None:
return dcm.StudyInstanceUID
return None
def peek_compressed_study(archive_path):
"""Peeks the `StudyInstanceUID` of a compressed DICOM archive, that is,
reads the first DICOM file in the archive and returns the associated ID.
Args:
archive_path (str): The filepath of the archive.
Returns:
str or None: The StudyInstanceUID if possible, None otherwise.
"""
mode = ''
if archive_path.lower().endswith('.gz') or archive_path.lower().endswith('.tgz'):
mode = 'r:gz'
elif archive_path.lower().endswith('.bz2') or archive_path.lower().endswith('.tbz'):
mode = 'r:bz2'
try:
tar_archive = tarfile.open(archive_path, mode)
except:
return None
dcm = None
while True:
tarinfo = tar_archive.next()
if tarinfo is None:
break
if tarinfo.isfile():
file_object = tar_archive.extractfile(tarinfo)
file_like_object = IOBuffer(file_object.read())
file_object.close()
file_like_object.seek(0)
try:
dcm = pydicom.read_file(file_like_object)
except:
continue
break
if dcm is not None:
return dcm.StudyInstanceUID
return None
def load_study(slices, debug=False):
"""Loads all DICOM files in the given list of filepaths
Args:
slices (list): The filepath of the archive.
debug (bool): To print debug information.
Returns:
study_uid: The StudyInstanceUID of the study.
clean_series: A list of all DICOM images.
"""
# remove sr and annotation files
# only kep image files
clean_slices = []
for s in slices:
try:
# try to get the position, fails for SR files and other which have no image content
ipp = s.ImagePositionPatient[2]
clean_slices.append(s)
except:
if debug:
print("Removing slice PatientID:{}; SOPClassUID:{}".format(s.PatientID, s.SOPClassUID))
slices = clean_slices
del clean_slices # free some memory
# find all series in this DICOM directory
slices.sort(key=lambda x: x.SeriesInstanceUID)
current = None
cidx = -1
series = []
for s in slices:
if not current == s.SeriesInstanceUID:
current = s.SeriesInstanceUID
cidx += 1
series.append([])
series[cidx].append(s)
# sort every series by z coordinate
for s in series:
s.sort(key=lambda x: float(x.ImagePositionPatient[2]))
study_uid = None
clean_series = []
# let's print some information about the series
clean_idx = 0
for idx, series_no in enumerate(series):
img = series_no[0]
if len(series_no) > 1:
snd = series_no[1]
thickness_x = img.PixelSpacing[0]
thickness_y = img.PixelSpacing[1]
thickness_z = np.abs(img.ImagePositionPatient[2] - snd.ImagePositionPatient[2])
else:
# continue of only one slice in series
continue
# continue if slices have no thickness
if np.max([thickness_x, thickness_y, thickness_z]) <= 0.0:
continue
# try:
# img.SpacingBetweenSlices
# except:
# # continue if there is no spacing
# continue
image_types = img.ImageType
if not image_types[0] == 'ORIGINAL' and image_types[1] == 'PRIMARY':
# is the image an ORIGINAL Image; an image whose pixel values are based on original or source data
# is the image a PRIMARY Image; an image created as a direct result of the Patient examination
continue
for slice_no in series_no:
slice_no.SliceThicknessX = thickness_x
slice_no.SliceThicknessY = thickness_y
slice_no.SliceThicknessZ = thickness_z
# series is fine, use it!
clean_series.append(series_no)
clean_idx += 1
if study_uid != img.StudyInstanceUID and study_uid is not None:
if debug:
print("Warning: Found multiple Study Instance UIDs in directory!")
if study_uid is None:
study_uid = img.StudyInstanceUID
return study_uid, clean_series
def clip_voxel_values(slices):
"""Clips the values of the given list of DICOM images to be in range [0,1),
that is, the values are scaled to be in range [0,1), all values below 0 are clipped to 0
and all values above 1 are clipped to 1.
The result is returned as a float array.
Args:
slices (list): A list of DICOM images.
Returns:
slices (numpy.array): The convertes images as a numpy array
spacing (numpy.array): The spacing of a single slice
"""
image = np.stack([s.pixel_array for s in slices])
# Convert to float32 (from sometimes int16),
# should be possible as values should always be low enough (<32k)
image = image.astype(np.float32)
# Convert to Hounsfield units (HU)
# Apply linear transformation from disk rep to memory rep
# See https://stackoverflow.com/questions/10193971/rescale-slope-and-rescale-intercept
# And https://stackoverflow.com/questions/8756096/window-width-and-center-calculation-of-dicom-image/8765366#8765366
for idx, slice in enumerate(slices):
intercept = slice.RescaleIntercept
slope = slice.RescaleSlope
img = image[idx]
img[img < 0] = 0
img[img > 4095] = 4095
if slope != 1:
print("WARNING:", "DICOM RescaleSlope != 1", "We should probably do something here.")
# img = slope * slice.astype(np.float64)
# img = slice.astype(np.int16)
# memory rep is in [0,4096)
# so no need to shift data
# img += np.int16(intercept)
# divide with max value, so that values are in range [0,1)
img /= np.float32(4095)
image[idx] = img
im = slices[0]
return np.array(image, dtype=np.float32),\
np.array(
[im.SliceThicknessX, im.SliceThicknessY, im.SliceThicknessZ],
dtype=np.float32)
def resample_volume(vol, spacing, new_spacing, order=2):
"""Resample the given volume with the given spacing to the given new_spacing.
Args:
vol (numpy.array): The 3D volume as numpy array in `z` `x` `y`
spacing (numpy.array): The spacing of the 3D volume in `x` `y` `z`
new_spacing (numpy.array): The new spacing to resample the volume to in `x` `y` `z`
order (int): The order of the spline interpolation
Returns:
volume (numpy.array): The resampled 3D volume
spacing (numpy.array): The spacing of the resampled 3D volume in `x` `y` `z`
"""
# volume has order z x y, so roll things around
vol_shape = np.roll(vol.shape, -1)
new_shape = np.round(np.multiply(vol_shape, np.divide(spacing, new_spacing)))
true_spacing = np.multiply(spacing, np.divide(vol_shape, new_shape))
resize_factor = np.divide(new_shape, vol_shape)
vol = scipy.ndimage.interpolation.zoom(
vol, resize_factor, mode='nearest', order=order)
return vol, true_spacing
def decompress_case(archive_path, debug=False):
"""Decompress a archive in memory.
Args:
archive_path (str): The filepath of the archive
debug (bool): To print debug information
Returns:
slices (list): The list of decompressed files
"""
mode = ''
if archive_path.lower().endswith('.gz') or archive_path.lower().endswith('.tgz'):
mode = 'r:gz'
elif archive_path.lower().endswith('.bz2') or archive_path.lower().endswith('.tbz'):
mode = 'r:bz2'
tar_archive = tarfile.open(archive_path, mode)
slices = []
if debug:
print("Untar...")
start = time.time()
while True:
tarinfo = tar_archive.next()
if tarinfo is None:
break
if tarinfo.isfile():
file_object = tar_archive.extractfile(tarinfo)
file_like_object = IOBuffer(file_object.read())
file_object.close()
file_like_object.seek(0)
try:
dcm = pydicom.read_file(file_like_object)
slices.append(dcm)
except:
continue
end_untar = time.time()
if debug:
print("Untar time: {}".format(end_untar - start))
return slices
def load_case(path):
"""Loads all DICOM files in the given directory.
Args:
path (str): The filepath of the directory
Returns:
slices (list): The list of loaded DICOM files
"""
slices = []
for s in os.listdir(path):
try:
dcm = pydicom.read_file(path + '/' + s)
slices.append(dcm)
except:
continue
return slices
if __name__ == '__main__':
"""Example usage of DCMTools functions.
This method demonstrates the usage of the provided functions to peek and load
compressed ('gz', 'tgz', 'bz2', 'tbz') archives or decompressed DICOM files
in a directory.
Example:
See `--help` for more information on how to use this example:
$ python3 dcmtools.py --help
"""
import argparse
from pathlib import Path
parser = argparse.ArgumentParser(prog="DCMTools", description="Loads DICOM files from directories and archives.")
# need to be
parser.add_argument("input", help="input file")
parser.add_argument('--resolution', default=[1.0, 1.0, 1.0], nargs=3, metavar=('x', 'y', 'z'),
type=float, help='resampled resolution in x y z')
parser.add_argument("--output", type=str, help="save the output file")
parser.add_argument("--studyuid", type=str, help="filter by study uid")
parser.add_argument("--seriesuid", type=str, help="filter by series uid")
parser.add_argument("--debug", action='store_true',
default=False, help="print debug information")
args = parser.parse_args()
# print the usage
parser.print_usage()
# transform input variables
resolution = np.array(args.resolution)
p = Path(args.input)
archives = []
archive_suffixes = ['gz', 'tgz', 'bz2', 'tbz']
if p.is_dir():
# try to find a file with archive suffix in the folder
try:
archives = [item for sblst in [list(p.glob('*.' + sffx)) for sffx in archive_suffixes] for item in sblst]
except:
pass
# check if input file is already a compressed archive
if p.exists() and not p.is_dir():
# it's a file!
if p.suffix[1:] in archive_suffixes:
archives.append(p)
# peek all study id's
for idx, arch in enumerate(archives):
arch_path = str(arch)
print("Found a *.tbz archive:", arch_path)
study_uid = peek_compressed_study(arch_path)
print("StudyInstanceUID: {}".format(study_uid))
# if at least one file is found,
# we can decompress it and get suid and slice count
if len(archives):
arch_path = str(archives[0])
print("Found a *.tbz archive:", arch_path)
study_uid = peek_compressed_study(arch_path)
print("StudyInstanceUID: {}".format(study_uid))
dcm_slices = decompress_case(arch_path)
print("Decompressed file {} with {} slices.".format(arch_path, len(dcm_slices)))
exit(0)
# no compressed archives found
# so proved input must be a directory to decompressed files
case_path = str(p)
print("Found DICOM directory:", case_path)
# peek directory to get study id
study_uid = peek_study(case_path)
print("StudyInstanceUID: {}".format(study_uid))
dcm_slices = load_case(case_path)
study_uid, clean_series = load_study(dcm_slices)
print("Found {} series with study uid ({}) in directory '{}':".format(len(clean_series), study_uid, case_path))
series = None
for series in clean_series:
img = series[0]
print("{} {} ({}): Dimension ({}/{}/{}) Thickness ({}/{}/{}:{}) ImageType ({}) SOPClassUID ({})".format(
img.SeriesInstanceUID, img.StudyDescription, img.SeriesDescription,
img.Rows, img.Columns, len(series),
img.SliceThicknessX, img.SliceThicknessY, img.SliceThicknessZ,
img.SpacingBetweenSlices,
img.ImageType, img.SOPClassUID
))
if series:
print("Load last seen series...")
# untouched voxel values
case_voxels, spacing = clip_voxel_values(series)
# resampled
resampled, data_spacing = resample_volume(case_voxels, spacing, resolution, order=1)
print("Done!")
|
# Copyright 2018 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for providers.kubernetes.kubernetes_virtual_machine."""
# pylint: disable=not-context-manager
import json
import unittest
import contextlib2
import mock
from perfkitbenchmarker import flags as flgs
from perfkitbenchmarker import os_types
from perfkitbenchmarker import providers
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.kubernetes import kubernetes_pod_spec
from perfkitbenchmarker.providers.kubernetes import kubernetes_virtual_machine
from tests import pkb_common_test_case
from six.moves import builtins
FLAGS = flgs.FLAGS
FLAGS.kubernetes_anti_affinity = False
_COMPONENT = 'test_component'
_RUN_URI = 'fake_run_uri'
_NAME = 'fake_name'
_KUBECTL = 'fake_kubectl_path'
_KUBECONFIG = 'fake_kubeconfig_path'
_EXPECTED_CALL_BODY_WITHOUT_GPUS = """
{
"spec": {
"dnsPolicy":
"ClusterFirst",
"volumes": [],
"containers": [{
"name": "fake_name",
"workingDir": "/root",
"volumeMounts": [],
"image": "test_image",
"securityContext": {
"privileged": true
}
}]
},
"kind": "Pod",
"metadata": {
"name": "fake_name",
"labels": {
"pkb": "fake_name"
}
},
"apiVersion": "v1"
}
"""
_EXPECTED_CALL_BODY_WITH_2_GPUS = """
{
"spec": {
"dnsPolicy":
"ClusterFirst",
"volumes": [],
"containers": [{
"name": "fake_name",
"volumeMounts": [],
"workingDir": "/root",
"image": "test_image",
"securityContext": {
"privileged": true
},
"resources" : {
"limits": {
"nvidia.com/gpu": "2"
},
"requests": {
"nvidia.com/gpu": "2"
}
}
}]
},
"kind": "Pod",
"metadata": {
"name": "fake_name",
"labels": {
"pkb": "fake_name"
}
},
"apiVersion": "v1"
}
"""
_EXPECTED_CALL_BODY_WITH_NVIDIA_CUDA_IMAGE = """
{
"spec": {
"dnsPolicy":
"ClusterFirst",
"volumes": [],
"containers": [{
"name": "fake_name",
"volumeMounts": [],
"workingDir": "/root",
"image": "nvidia/cuda:9.0-devel-ubuntu16.04",
"securityContext": {
"privileged": true
},
"command": [
"bash",
"-c",
"apt-get update && apt-get install -y sudo && sed -i '/env_reset/d' /etc/sudoers && sed -i '/secure_path/d' /etc/sudoers && sudo ldconfig && tail -f /dev/null"
]
}]
},
"kind": "Pod",
"metadata": {
"name": "fake_name",
"labels": {
"pkb": "fake_name"
}
},
"apiVersion": "v1"
}
"""
def get_write_mock_from_temp_file_mock(temp_file_mock):
"""Returns the write method mock from the NamedTemporaryFile mock.
This can be used to make assertions about the calls make to write(),
which exists on the instance returned from the NamedTemporaryFile mock.
The reason for the __enter__() in this context is due to the fact
that NamedTemporaryFile is used in a context manager inside
kubernetes_helper.py.
Args:
temp_file_mock: mock object of the NamedTemporaryFile() contextManager
"""
return temp_file_mock().__enter__().write
@contextlib2.contextmanager
def patch_critical_objects(stdout='', stderr='', return_code=0, flags=FLAGS):
with contextlib2.ExitStack() as stack:
retval = (stdout, stderr, return_code)
flags.gcloud_path = 'gcloud'
flags.run_uri = _RUN_URI
flags.kubectl = _KUBECTL
flags.kubeconfig = _KUBECONFIG
stack.enter_context(mock.patch(builtins.__name__ + '.open'))
stack.enter_context(mock.patch(vm_util.__name__ + '.PrependTempDir'))
# Save and return the temp_file mock here so that we can access the write()
# call on the instance that the mock returned. This allows us to verify
# that the body of the file is what we expect it to be (useful for
# verifying that the pod.yml body was written correctly).
temp_file = stack.enter_context(
mock.patch(vm_util.__name__ + '.NamedTemporaryFile'))
issue_command = stack.enter_context(
mock.patch(vm_util.__name__ + '.IssueCommand', return_value=retval))
yield issue_command, temp_file
class BaseKubernetesVirtualMachineTestCase(
pkb_common_test_case.PkbCommonTestCase):
def assertJsonEqual(self, str1, str2):
json1 = json.loads(str1)
json2 = json.loads(str2)
self.assertEqual(
json.dumps(json1, sort_keys=True),
json.dumps(json2, sort_keys=True)
)
class KubernetesResourcesTestCase(
BaseKubernetesVirtualMachineTestCase):
@staticmethod
def create_virtual_machine_spec():
spec = kubernetes_pod_spec.KubernetesPodSpec(
_COMPONENT,
resource_limits={
'cpus': 2,
'memory': '5GiB'
},
resource_requests={
'cpus': 1.5,
'memory': '4GiB'
},
gpu_count=2,
gpu_type='k80',
)
return spec
def testPodResourceLimits(self):
spec = self.create_virtual_machine_spec()
self.assertEqual(spec.resource_limits.cpus, 2)
self.assertEqual(spec.resource_limits.memory, 5120)
def testCreatePodResourceBody(self):
spec = self.create_virtual_machine_spec()
with patch_critical_objects():
kub_vm = kubernetes_virtual_machine.KubernetesVirtualMachine(spec)
expected = {
'limits': {
'cpu': '2',
'memory': '5120Mi',
'nvidia.com/gpu': '2'
},
'requests': {
'cpu': '1.5',
'memory': '4096Mi',
'nvidia.com/gpu': '2'
}
}
actual = kub_vm._BuildResourceBody()
self.assertDictEqual(expected, actual)
def testGetMetadata(self):
spec = self.create_virtual_machine_spec()
with patch_critical_objects():
kub_vm = kubernetes_virtual_machine.KubernetesVirtualMachine(spec)
subset_of_expected_metadata = {
'pod_cpu_limit': 2,
'pod_memory_limit_mb': 5120,
'pod_cpu_request': 1.5,
'pod_memory_request_mb': 4096,
}
actual = kub_vm.GetResourceMetadata()
self.assertDictContainsSubset(subset_of_expected_metadata, actual)
class KubernetesVirtualMachineOsTypesTestCase(
BaseKubernetesVirtualMachineTestCase):
@staticmethod
def create_kubernetes_vm(os_type):
spec = kubernetes_pod_spec.KubernetesPodSpec(
_COMPONENT)
vm_class = virtual_machine.GetVmClass(providers.KUBERNETES,
os_type)
kub_vm = vm_class(spec)
kub_vm._WaitForPodBootCompletion = lambda: None
kub_vm._Create()
def testUbuntuImagesInstallSudo(self):
with patch_critical_objects() as (_, temp_file):
self.create_kubernetes_vm(os_types.UBUNTU1604)
write_mock = get_write_mock_from_temp_file_mock(temp_file)
create_json = json.loads(write_mock.call_args[0][0])
command = create_json['spec']['containers'][0]['command']
self.assertEqual(command,
[u'bash', u'-c',
(u'apt-get update && apt-get install -y sudo && '
'sed -i \'/env_reset/d\' /etc/sudoers && '
'sed -i \'/secure_path/d\' /etc/sudoers && '
'sudo ldconfig && tail -f /dev/null')])
def testCreateUbuntu1604(self):
with patch_critical_objects() as (_, temp_file):
self.create_kubernetes_vm(os_types.UBUNTU1604)
write_mock = get_write_mock_from_temp_file_mock(temp_file)
create_json = json.loads(write_mock.call_args[0][0])
self.assertEqual(create_json['spec']['containers'][0]['image'],
'ubuntu:16.04')
def testCreateUbuntu1710(self):
with patch_critical_objects() as (_, temp_file):
self.create_kubernetes_vm(os_types.UBUNTU1710)
write_mock = get_write_mock_from_temp_file_mock(temp_file)
create_json = json.loads(write_mock.call_args[0][0])
self.assertEqual(create_json['spec']['containers'][0]['image'],
'ubuntu:17.10')
class KubernetesVirtualMachineTestCase(
BaseKubernetesVirtualMachineTestCase):
@staticmethod
def create_virtual_machine_spec():
spec = kubernetes_pod_spec.KubernetesPodSpec(
_COMPONENT,
image='test_image',
install_packages=False,
machine_type='test_machine_type',
zone='test_zone')
return spec
def testCreate(self):
spec = self.create_virtual_machine_spec()
with patch_critical_objects() as (issue_command, _):
kub_vm = kubernetes_virtual_machine.KubernetesVirtualMachine(spec)
kub_vm._WaitForPodBootCompletion = lambda: None
kub_vm._Create()
command = issue_command.call_args[0][0]
command_string = ' '.join(command[:4])
self.assertEqual(issue_command.call_count, 1)
self.assertIn('{0} --kubeconfig={1} create -f'.format(
_KUBECTL, _KUBECONFIG), command_string)
def testCreatePodBodyWrittenCorrectly(self):
spec = self.create_virtual_machine_spec()
with patch_critical_objects() as (_, temp_file):
kub_vm = kubernetes_virtual_machine.KubernetesVirtualMachine(spec)
# Need to set the name explicitly on the instance because the test
# running is currently using a single PKB instance, so the BaseVm
# instance counter is at an unpredictable number at this stage, and it is
# used to set the name.
kub_vm.name = _NAME
kub_vm._WaitForPodBootCompletion = lambda: None
kub_vm._Create()
write_mock = get_write_mock_from_temp_file_mock(temp_file)
self.assertJsonEqual(
write_mock.call_args[0][0],
_EXPECTED_CALL_BODY_WITHOUT_GPUS
)
def testDownloadPreprovisionedDataAws(self):
spec = self.create_virtual_machine_spec()
FLAGS.container_cluster_cloud = 'AWS'
with patch_critical_objects(flags=FLAGS) as (issue_command, _):
kub_vm = (
kubernetes_virtual_machine.Ubuntu1604BasedKubernetesVirtualMachine(
spec))
kub_vm.DownloadPreprovisionedData('path', 'name', 'filename')
command = issue_command.call_args[0][0]
command_string = ' '.join(command)
self.assertIn('s3', command_string)
def testDownloadPreprovisionedDataAzure(self):
spec = self.create_virtual_machine_spec()
FLAGS.container_cluster_cloud = 'Azure'
with patch_critical_objects() as (issue_command, _):
kub_vm = (
kubernetes_virtual_machine.Ubuntu1604BasedKubernetesVirtualMachine(
spec))
kub_vm.DownloadPreprovisionedData('path', 'name', 'filename')
command = issue_command.call_args[0][0]
command_string = ' '.join(command)
self.assertIn('az storage blob download', command_string)
def testDownloadPreprovisionedDataGcp(self):
spec = self.create_virtual_machine_spec()
FLAGS.container_cluster_cloud = 'GCP'
with patch_critical_objects() as (issue_command, _):
kub_vm = (
kubernetes_virtual_machine.Ubuntu1604BasedKubernetesVirtualMachine(
spec))
kub_vm.DownloadPreprovisionedData('path', 'name', 'filename')
command = issue_command.call_args[0][0]
command_string = ' '.join(command)
self.assertIn('gsutil', command_string)
class KubernetesVirtualMachineWithGpusTestCase(
BaseKubernetesVirtualMachineTestCase):
@staticmethod
def create_virtual_machine_spec():
spec = kubernetes_pod_spec.KubernetesPodSpec(
_COMPONENT,
image='test_image',
gpu_count=2,
gpu_type='k80',
install_packages=False,
machine_type='test_machine_type',
zone='test_zone')
return spec
def testCreate(self):
spec = self.create_virtual_machine_spec()
with patch_critical_objects() as (issue_command, _):
kub_vm = kubernetes_virtual_machine.KubernetesVirtualMachine(spec)
kub_vm._WaitForPodBootCompletion = lambda: None
kub_vm._Create()
command = issue_command.call_args[0][0]
command_string = ' '.join(command[:4])
self.assertEqual(issue_command.call_count, 1)
self.assertIn('{0} --kubeconfig={1} create -f'.format(
_KUBECTL, _KUBECONFIG), command_string)
def testCreatePodBodyWrittenCorrectly(self):
spec = self.create_virtual_machine_spec()
with patch_critical_objects() as (_, temp_file):
kub_vm = kubernetes_virtual_machine.KubernetesVirtualMachine(spec)
# Need to set the name explicitly on the instance because the test
# running is currently using a single PKB instance, so the BaseVm
# instance counter is at an unpredictable number at this stage, and it is
# used to set the name.
kub_vm.name = _NAME
kub_vm._WaitForPodBootCompletion = lambda: None
kub_vm._Create()
write_mock = get_write_mock_from_temp_file_mock(temp_file)
self.assertJsonEqual(
write_mock.call_args[0][0],
_EXPECTED_CALL_BODY_WITH_2_GPUS
)
class KubernetesVirtualMachineWithNvidiaCudaImage(
BaseKubernetesVirtualMachineTestCase):
@staticmethod
def create_virtual_machine_spec():
spec = kubernetes_pod_spec.KubernetesPodSpec(
_COMPONENT,
install_packages=False,
machine_type='test_machine_type',
zone='test_zone')
return spec
def testCreatePodBodyWrittenCorrectly(self):
spec = self.create_virtual_machine_spec()
vm_class = virtual_machine.GetVmClass(providers.KUBERNETES,
os_types.UBUNTU1604_CUDA9)
with patch_critical_objects() as (_, temp_file):
kub_vm = vm_class(spec)
# Need to set the name explicitly on the instance because the test
# running is currently using a single PKB instance, so the BaseVm
# instance counter is at an unpredictable number at this stage, and it is
# used to set the name.
kub_vm.name = _NAME
kub_vm._WaitForPodBootCompletion = lambda: None
kub_vm._Create()
write_mock = get_write_mock_from_temp_file_mock(temp_file)
self.assertJsonEqual(
write_mock.call_args[0][0],
_EXPECTED_CALL_BODY_WITH_NVIDIA_CUDA_IMAGE
)
if __name__ == '__main__':
unittest.main()
|
import torch
import torch.nn as nn
class StackedLSTM(nn.Module):
"""
Our own implementation of stacked LSTM.
Needed for the decoder, because we do input feeding.
"""
def __init__(self, num_layers, input_size, rnn_size, dropout):
super(StackedLSTM, self).__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(nn.LSTMCell(input_size, rnn_size))
input_size = rnn_size
def forward(self, input, hidden):
h_0, c_0 = hidden
h_1, c_1 = [], []
for i, layer in enumerate(self.layers):
h_1_i, c_1_i = layer(input, (h_0[i], c_0[i]))
input = h_1_i
if i + 1 != self.num_layers:
input = self.dropout(input)
h_1 += [h_1_i]
c_1 += [c_1_i]
h_1 = torch.stack(h_1)
c_1 = torch.stack(c_1)
return input, (h_1, c_1)
class StackedGRU(nn.Module):
def __init__(self, num_layers, input_size, rnn_size, dropout):
super(StackedGRU, self).__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(nn.GRUCell(input_size, rnn_size))
input_size = rnn_size
def forward(self, input, hidden):
h_1 = []
for i, layer in enumerate(self.layers):
h_1_i = layer(input, hidden[0][i])
input = h_1_i
if i + 1 != self.num_layers:
input = self.dropout(input)
h_1 += [h_1_i]
h_1 = torch.stack(h_1)
return input, (h_1,)
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Websocket proxy that is compatible with OpenStack Nova
Serial consoles. Leverages websockify.py by Joel Martin.
Based on nova-novncproxy.
"""
import os
import sys
from oslo.config import cfg
from nova import config
from nova.console import websocketproxy
from nova.openstack.common import log as logging
from nova.openstack.common.report import guru_meditation_report as gmr
from nova import version
opts = [
cfg.StrOpt('serialproxy_host',
default='0.0.0.0',
help='Host on which to listen for incoming requests'),
cfg.IntOpt('serialproxy_port',
default=6083,
help='Port on which to listen for incoming requests'),
]
CONF = cfg.CONF
CONF.register_cli_opts(opts, group="serial_console")
CONF.import_opt('debug', 'nova.openstack.common.log')
CONF.import_opt('record', 'nova.cmd.novnc')
CONF.import_opt('daemon', 'nova.cmd.novnc')
CONF.import_opt('ssl_only', 'nova.cmd.novnc')
CONF.import_opt('source_is_ipv6', 'nova.cmd.novnc')
CONF.import_opt('cert', 'nova.cmd.novnc')
CONF.import_opt('key', 'nova.cmd.novnc')
def exit_with_error(msg, errno=-1):
print(msg) and sys.exit(errno)
def main():
# Setup flags
config.parse_args(sys.argv)
if CONF.ssl_only and not os.path.exists(CONF.cert):
exit_with_error("SSL only and %s not found" % CONF.cert)
logging.setup("nova")
gmr.TextGuruMeditation.setup_autorun(version)
# Create and start the NovaWebSockets proxy
server = websocketproxy.NovaWebSocketProxy(
listen_host=CONF.serial_console.serialproxy_host,
listen_port=CONF.serial_console.serialproxy_port,
source_is_ipv6=CONF.source_is_ipv6,
verbose=CONF.verbose,
cert=CONF.cert,
key=CONF.key,
ssl_only=CONF.ssl_only,
daemon=CONF.daemon,
record=CONF.record,
traffic=CONF.verbose and not CONF.daemon,
file_only=True,
RequestHandlerClass=websocketproxy.NovaProxyRequestHandler)
server.start_server()
|
"""
IPMI control of CPU temperatures.
"""
import re
from typing import Dict
from .controller_state import ControllerState
from .cpu_sensor import CpuSensor
from .ipmitool import Ipmitool
from .util import parse_hex
class IpmiCpu:
"""
IPMI control of CPU temperatures.
"""
ipmitool: Ipmitool
pat_integer = re.compile(r'^(\d+)')
def __init__(self, host: str, username: str, password: str) -> None:
self.ipmitool = Ipmitool(host, username, password)
def discover_sensors(self, state: ControllerState) -> None:
"""
Query IPMI for list of CPUs.
Must call this method first before using this class.
"""
rows = self.ipmitool.sdr_type('temperature')
# Filter CPU temp sensors.
cpu_map: Dict[str, CpuSensor] = {}
for row in rows:
name = row[0]
if name != 'Temp':
continue
sensor = CpuSensor()
sensor.name = name
sensor.id = parse_hex(row[1])
# CPU temperature
match_integer = self.pat_integer.match(row[4])
if match_integer is not None:
sensor.temp = int(match_integer.groups()[0])
print(f'Found CPU temperature sensor: {name} ({sensor.id:#x})')
key = f'{name} ({sensor.id:#x})'
cpu_map[key] = sensor
state.cpu_map = cpu_map
self.dump_sensors(state)
def read_sensors(self, state: ControllerState) -> None:
"""
Read current sensor values.
Store values in state.
"""
rows = self.ipmitool.sdr_type('temperature')
for row in rows:
if len(row) < 2:
continue
key = f'{row[0]} ({parse_hex(row[1]):#x})'
if key in state.cpu_map:
sensor = state.cpu_map[key]
# CPU temperature
match_integer = self.pat_integer.match(row[4])
if match_integer is not None:
sensor.temp = int(match_integer.groups()[0])
self.dump_sensors(state)
def dump_sensors(self, state: ControllerState) -> None:
"""
Dump sensors to console.
"""
names = state.cpu_map.keys()
for name in sorted(names):
cpu = state.cpu_map[name]
print(cpu)
def get_max_cpu_temp(self, state: ControllerState) -> float:
"""
Get maximum of CPU temps.
"""
temp: float = 0.0
for key in state.cpu_map:
sensor = state.cpu_map[key]
temp = max(temp, sensor.temp)
return temp
|
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
"""
Main command-line interface to PyInstaller.
"""
import os
import argparse
import platform
from . import __version__
from . import log as logging
# note: don't import anything else until this function is run!
from .compat import check_requirements, is_conda
logger = logging.getLogger(__name__)
# Taken from https://stackoverflow.com/a/22157136 to format args more flexibly:
# any help text which beings with ``R|`` will have all newlines preserved; the
# help text will be line wrapped. See
# https://docs.python.org/3/library/argparse.html#formatter-class.
#
# This is used by the ``--debug`` option.
class _SmartFormatter(argparse.HelpFormatter):
def _split_lines(self, text, width):
if text.startswith('R|'):
# The underlying implementation of ``RawTextHelpFormatter._split_lines``
# invokes this; mimic it.
return text[2:].splitlines()
else:
# Invoke the usual formatter.
return super(_SmartFormatter, self)._split_lines(text, width)
def run_makespec(filenames, **opts):
# Split pathex by using the path separator
temppaths = opts['pathex'][:]
pathex = opts['pathex'] = []
for p in temppaths:
pathex.extend(p.split(os.pathsep))
import PyInstaller.building.makespec
spec_file = PyInstaller.building.makespec.main(filenames, **opts)
logger.info('wrote %s' % spec_file)
return spec_file
def run_build(pyi_config, spec_file, **kwargs):
import PyInstaller.building.build_main
PyInstaller.building.build_main.main(pyi_config, spec_file, **kwargs)
def __add_options(parser):
parser.add_argument('-v', '--version', action='version',
version=__version__,
help='Show program version info and exit.')
def run(pyi_args=None, pyi_config=None):
"""
pyi_args allows running PyInstaller programatically without a subprocess
pyi_config allows checking configuration once when running multiple tests
"""
check_requirements()
import PyInstaller.building.makespec
import PyInstaller.building.build_main
import PyInstaller.log
try:
parser = argparse.ArgumentParser(formatter_class=_SmartFormatter)
__add_options(parser)
PyInstaller.building.makespec.__add_options(parser)
PyInstaller.building.build_main.__add_options(parser)
PyInstaller.log.__add_options(parser)
parser.add_argument('filenames', metavar='scriptname', nargs='+',
help=("name of scriptfiles to be processed or "
"exactly one .spec-file. If a .spec-file is "
"specified, most options are unnecessary "
"and are ignored."))
args = parser.parse_args(pyi_args)
PyInstaller.log.__process_options(parser, args)
# Print PyInstaller version, Python version and platform
# as the first line to stdout.
# This helps identify PyInstaller, Python and platform version
# when users report issues.
logger.info('PyInstaller: %s' % __version__)
logger.info('Python: %s%s', platform.python_version(),
" (conda)" if is_conda else "")
logger.info('Platform: %s' % platform.platform())
# Skip creating .spec when .spec file is supplied
if args.filenames[0].endswith('.spec'):
spec_file = args.filenames[0]
else:
spec_file = run_makespec(**vars(args))
run_build(pyi_config, spec_file, **vars(args))
except KeyboardInterrupt:
raise SystemExit("Aborted by user request.")
except RecursionError:
from . import _recursion_to_deep_message
_recursion_to_deep_message.raise_with_msg()
if __name__ == '__main__':
run()
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangographene.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Write an inference graph for separation models.
Example usage, using tf.estimator compatible functions:
def input_fn(params):
...
def model_fn(features, labels, mode, params):
...
inference_graph.write_inference_graph(model_fn, input_fn, params)
"""
import copy
import os
import tensorflow.compat.v1 as tf
tf.enable_eager_execution()
def write(model_fn, input_fn, params, directory):
"""Writes an inference graph."""
input_fn_params = copy.deepcopy(params)
input_fn_params['inference'] = True
input_fn_params['batch_size'] = 1
model_fn_params = copy.deepcopy(params)
model_fn_params['batch_size'] = 1
with tf.Graph().as_default() as graph:
features = input_fn(input_fn_params)
model_fn(features=features,
labels=None,
mode=tf.estimator.ModeKeys.PREDICT,
params=model_fn_params)
tf.train.Saver()
graph_def = graph.as_graph_def(add_shapes=True)
tf.train.write_graph(graph_def, directory, 'inference.pbtxt')
meta_graph_name = os.path.join(directory, 'inference.meta')
tf.train.export_meta_graph(filename=meta_graph_name)
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("@proxy_wasm_cpp_host//bazel/cargo:crates.bzl", "proxy_wasm_cpp_host_fetch_remote_crates")
load("@rules_rust//rust:repositories.bzl", "rust_repositories")
def proxy_wasm_cpp_host_dependencies():
rust_repositories()
proxy_wasm_cpp_host_fetch_remote_crates()
|
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Utb Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.blocktools import get_masternode_payment, create_coinbase, create_block
from test_framework.mininode import *
from test_framework.test_framework import UtbTestFramework
from test_framework.util import *
from time import *
'''
llmq-is-cl-conflicts.py
Checks conflict handling between ChainLocks and InstantSend
'''
class TestNode(SingleNodeConnCB):
def __init__(self):
SingleNodeConnCB.__init__(self)
self.clsigs = {}
self.islocks = {}
def send_clsig(self, clsig):
hash = uint256_from_str(hash256(clsig.serialize()))
self.clsigs[hash] = clsig
inv = msg_inv([CInv(29, hash)])
self.send_message(inv)
def send_islock(self, islock):
hash = uint256_from_str(hash256(islock.serialize()))
self.islocks[hash] = islock
inv = msg_inv([CInv(30, hash)])
self.send_message(inv)
def on_getdata(self, conn, message):
for inv in message.inv:
if inv.hash in self.clsigs:
self.send_message(self.clsigs[inv.hash])
if inv.hash in self.islocks:
self.send_message(self.islocks[inv.hash])
class LLMQ_IS_CL_Conflicts(UtbTestFramework):
def __init__(self):
super().__init__(6, 5, [], fast_dip3_enforcement=True)
#disable_mocktime()
def run_test(self):
while self.nodes[0].getblockchaininfo()["bip9_softforks"]["dip0008"]["status"] != "active":
self.nodes[0].generate(10)
sync_blocks(self.nodes, timeout=60*5)
self.test_node = TestNode()
self.test_node.add_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node))
NetworkThread().start() # Start up network handling in another thread
self.test_node.wait_for_verack()
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.nodes[0].spork("SPORK_19_CHAINLOCKS_ENABLED", 0)
self.nodes[0].spork("SPORK_20_INSTANTSEND_LLMQ_BASED", 0)
self.wait_for_sporks_same()
self.mine_quorum()
# mine single block, wait for chainlock
self.nodes[0].generate(1)
self.wait_for_chainlock_tip_all_nodes()
self.test_chainlock_overrides_islock(False)
self.test_chainlock_overrides_islock(True)
self.test_islock_overrides_nonchainlock()
def test_chainlock_overrides_islock(self, test_block_conflict):
# create three raw TXs, they will conflict with each other
rawtx1 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
rawtx2 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
rawtx3 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
rawtx1_obj = FromHex(CTransaction(), rawtx1)
rawtx2_obj = FromHex(CTransaction(), rawtx2)
rawtx3_obj = FromHex(CTransaction(), rawtx3)
rawtx1_txid = self.nodes[0].sendrawtransaction(rawtx1)
rawtx2_txid = encode(hash256(hex_str_to_bytes(rawtx2))[::-1], 'hex_codec').decode('ascii')
rawtx3_txid = encode(hash256(hex_str_to_bytes(rawtx3))[::-1], 'hex_codec').decode('ascii')
# Create a chained TX on top of tx1
inputs = []
n = 0
for out in rawtx1_obj.vout:
if out.nValue == 100000000:
inputs.append({"txid": rawtx1_txid, "vout": n})
n += 1
rawtx4 = self.nodes[0].createrawtransaction(inputs, {self.nodes[0].getnewaddress(): 0.999})
rawtx4 = self.nodes[0].signrawtransaction(rawtx4)['hex']
rawtx4_txid = self.nodes[0].sendrawtransaction(rawtx4)
for node in self.nodes:
self.wait_for_instantlock(rawtx1_txid, node)
self.wait_for_instantlock(rawtx4_txid, node)
block = self.create_block(self.nodes[0], [rawtx2_obj])
if test_block_conflict:
submit_result = self.nodes[0].submitblock(ToHex(block))
assert(submit_result == "conflict-tx-lock")
cl = self.create_chainlock(self.nodes[0].getblockcount() + 1, block.sha256)
self.test_node.send_clsig(cl)
# Give the CLSIG some time to propagate. We unfortunately can't check propagation here as "getblock/getblockheader"
# is required to check for CLSIGs, but this requires the block header to be propagated already
sleep(1)
# The block should get accepted now, and at the same time prune the conflicting ISLOCKs
submit_result = self.nodes[1].submitblock(ToHex(block))
if test_block_conflict:
assert(submit_result == "duplicate")
else:
assert(submit_result is None)
for node in self.nodes:
self.wait_for_chainlock(node, "%064x" % block.sha256)
# Create a chained TX on top of tx2
inputs = []
n = 0
for out in rawtx2_obj.vout:
if out.nValue == 100000000:
inputs.append({"txid": rawtx2_txid, "vout": n})
n += 1
rawtx5 = self.nodes[0].createrawtransaction(inputs, {self.nodes[0].getnewaddress(): 0.999})
rawtx5 = self.nodes[0].signrawtransaction(rawtx5)['hex']
rawtx5_txid = self.nodes[0].sendrawtransaction(rawtx5)
for node in self.nodes:
self.wait_for_instantlock(rawtx5_txid, node)
# Lets verify that the ISLOCKs got pruned
for node in self.nodes:
assert_raises_jsonrpc(-5, "No such mempool or blockchain transaction", node.getrawtransaction, rawtx1_txid, True)
assert_raises_jsonrpc(-5, "No such mempool or blockchain transaction", node.getrawtransaction, rawtx4_txid, True)
rawtx = node.getrawtransaction(rawtx2_txid, True)
assert(rawtx['chainlock'])
assert(rawtx['instantlock'])
assert(not rawtx['instantlock_internal'])
def test_islock_overrides_nonchainlock(self):
# create two raw TXs, they will conflict with each other
rawtx1 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
rawtx2 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
rawtx1_txid = encode(hash256(hex_str_to_bytes(rawtx1))[::-1], 'hex_codec').decode('ascii')
rawtx2_txid = encode(hash256(hex_str_to_bytes(rawtx2))[::-1], 'hex_codec').decode('ascii')
# Create an ISLOCK but don't broadcast it yet
islock = self.create_islock(rawtx2)
# Stop enough MNs so that ChainLocks don't work anymore
for i in range(3):
self.stop_node(len(self.nodes) - 1)
self.nodes.pop(len(self.nodes) - 1)
self.mninfo.pop(len(self.mninfo) - 1)
# Send tx1, which will later conflict with the ISLOCK
self.nodes[0].sendrawtransaction(rawtx1)
# fast forward 11 minutes, so that the TX is considered safe and included in the next block
set_mocktime(get_mocktime() + int(60 * 11))
set_node_times(self.nodes, get_mocktime())
# Mine the conflicting TX into a block
good_tip = self.nodes[0].getbestblockhash()
self.nodes[0].generate(2)
self.sync_all()
# Assert that the conflicting tx got mined and the locked TX is not valid
assert(self.nodes[0].getrawtransaction(rawtx1_txid, True)['confirmations'] > 0)
assert_raises_jsonrpc(-25, "Missing inputs", self.nodes[0].sendrawtransaction, rawtx2)
# Send the ISLOCK, which should result in the last 2 blocks to be invalidated, even though the nodes don't know
# the locked transaction yet
self.test_node.send_islock(islock)
sleep(5)
assert(self.nodes[0].getbestblockhash() == good_tip)
assert(self.nodes[1].getbestblockhash() == good_tip)
# Send the actual transaction and mine it
self.nodes[0].sendrawtransaction(rawtx2)
self.nodes[0].generate(1)
self.sync_all()
assert(self.nodes[0].getrawtransaction(rawtx2_txid, True)['confirmations'] > 0)
assert(self.nodes[1].getrawtransaction(rawtx2_txid, True)['confirmations'] > 0)
assert(self.nodes[0].getrawtransaction(rawtx2_txid, True)['instantlock'])
assert(self.nodes[1].getrawtransaction(rawtx2_txid, True)['instantlock'])
assert(self.nodes[0].getbestblockhash() != good_tip)
assert(self.nodes[1].getbestblockhash() != good_tip)
def wait_for_chainlock_tip_all_nodes(self):
for node in self.nodes:
tip = node.getbestblockhash()
self.wait_for_chainlock(node, tip)
def wait_for_chainlock_tip(self, node):
tip = node.getbestblockhash()
self.wait_for_chainlock(node, tip)
def wait_for_chainlock(self, node, block_hash):
t = time()
while time() - t < 15:
try:
block = node.getblockheader(block_hash)
if block["confirmations"] > 0 and block["chainlock"]:
return
except:
# block might not be on the node yet
pass
sleep(0.1)
raise AssertionError("wait_for_chainlock timed out")
def create_block(self, node, vtx=[]):
bt = node.getblocktemplate()
height = bt['height']
tip_hash = bt['previousblockhash']
coinbasevalue = bt['coinbasevalue']
miner_address = node.getnewaddress()
mn_payee = bt['masternode'][0]['payee']
# calculate fees that the block template included (we'll have to remove it from the coinbase as we won't
# include the template's transactions
bt_fees = 0
for tx in bt['transactions']:
bt_fees += tx['fee']
new_fees = 0
for tx in vtx:
in_value = 0
out_value = 0
for txin in tx.vin:
txout = node.gettxout("%064x" % txin.prevout.hash, txin.prevout.n, False)
in_value += int(txout['value'] * COIN)
for txout in tx.vout:
out_value += txout.nValue
new_fees += in_value - out_value
# fix fees
coinbasevalue -= bt_fees
coinbasevalue += new_fees
mn_amount = get_masternode_payment(height, coinbasevalue)
miner_amount = coinbasevalue - mn_amount
outputs = {miner_address: str(Decimal(miner_amount) / COIN)}
if mn_amount > 0:
outputs[mn_payee] = str(Decimal(mn_amount) / COIN)
coinbase = FromHex(CTransaction(), node.createrawtransaction([], outputs))
coinbase.vin = create_coinbase(height).vin
# We can't really use this one as it would result in invalid merkle roots for masternode lists
if len(bt['coinbase_payload']) != 0:
cbtx = FromHex(CCbTx(version=1), bt['coinbase_payload'])
coinbase.nVersion = 3
coinbase.nType = 5 # CbTx
coinbase.vExtraPayload = cbtx.serialize()
coinbase.calc_sha256()
block = create_block(int(tip_hash, 16), coinbase, nTime=bt['curtime'])
block.vtx += vtx
# Add quorum commitments from template
for tx in bt['transactions']:
tx2 = FromHex(CTransaction(), tx['data'])
if tx2.nType == 6:
block.vtx.append(tx2)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
return block
def create_chainlock(self, height, blockHash):
request_id = "%064x" % uint256_from_str(hash256(ser_string(b"clsig") + struct.pack("<I", height)))
message_hash = "%064x" % blockHash
for mn in self.mninfo:
mn.node.quorum('sign', 100, request_id, message_hash)
recSig = None
t = time()
while time() - t < 10:
try:
recSig = self.nodes[0].quorum('getrecsig', 100, request_id, message_hash)
break
except:
sleep(0.1)
assert(recSig is not None)
clsig = msg_clsig(height, blockHash, hex_str_to_bytes(recSig['sig']))
return clsig
def create_islock(self, hextx):
tx = FromHex(CTransaction(), hextx)
tx.rehash()
request_id_buf = ser_string(b"islock") + ser_compact_size(len(tx.vin))
inputs = []
for txin in tx.vin:
request_id_buf += txin.prevout.serialize()
inputs.append(txin.prevout)
request_id = "%064x" % uint256_from_str(hash256(request_id_buf))
message_hash = "%064x" % tx.sha256
for mn in self.mninfo:
mn.node.quorum('sign', 100, request_id, message_hash)
recSig = None
t = time()
while time() - t < 10:
try:
recSig = self.nodes[0].quorum('getrecsig', 100, request_id, message_hash)
break
except:
sleep(0.1)
assert(recSig is not None)
islock = msg_islock(inputs, tx.sha256, hex_str_to_bytes(recSig['sig']))
return islock
if __name__ == '__main__':
LLMQ_IS_CL_Conflicts().main()
|
from nltk.corpus import wordnet
import nltk
from nltk.corpus import wordnet
def strip_non_ascii(s):
return "".join(i for i in s if ord(i) < 128)
filename = raw_input("tokens filename: ")
lines = []
with open(filename, 'r') as f:
for line in f:
lines.append(strip_non_ascii(line))
narratives = [x[x.rfind('|')+1:].strip() for x in lines]
tokens_list = [nltk.wordpunct_tokenize(x) for x in narratives]
# pos_list = [nltk.pos_tag(x) for x in tokens_list]
print "Tokenizing..."
tokens = []
for row in tokens_list:
for token in row:
if wordnet.synsets(token): # filer out non-english words
tokens.append(token)
tokens = list(set(tokens))
tokens.sort()
print "Stemming..."
# Make stems
stems = [nltk.stem.snowball.EnglishStemmer().stem(x) for x in tokens]
stems = list(set(stems))
stems.sort()
print "Writing..."
with open("stems.txt", 'w') as f:
for stem in stems:
f.write(stem + "\n")
|
"""tutorial URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url, include
urlpatterns = [
path('admin/', admin.site.urls),
]
urlpatterns += [
url(r'^', include('gmoji.urls')),
]
|
###################################################################################################
#ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network
#Paper-Link: https://arxiv.org/pdf/1811.11431.pdf
###################################################################################################
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
from model.ESPNet_v2.ModelX4 import EESPNet, EESP
from model.ESPNet_v2.cnn_utilsX4 import *
from fvcore.nn.flop_count import flop_count #https://github.com/facebookresearch/fvcore
from tools.flops_counter.ptflops import get_model_complexity_info
from thop import profile #https://github.com/Lyken17/pytorch-OpCounter
__all__ = ["EESPNet_SegX4"]
class EESPNet_SegX4(nn.Module):
def __init__(self, classes=19, s=2, pretrained=None, gpus=1):
super().__init__()
classificationNet = EESPNet(classes=1000, s=s)
if gpus >=1:
classificationNet = nn.DataParallel(classificationNet)
# print(classificationNet)
# load the pretrained weights
if pretrained:
if not os.path.isfile(pretrained):
print('Weight file does not exist. Training without pre-trained weights')
print('Model initialized with pretrained weights')
classificationNet.load_state_dict(torch.load(pretrained))
self.net = classificationNet.module
del classificationNet
# delete last few layers
del self.net.classifier
del self.net.level5
del self.net.level5_0
if s <=0.5:
p = 0.1
else:
p=0.2
self.proj_L4_C = CBR(self.net.level4[-1].module_act.num_parameters, self.net.level3[-1].module_act.num_parameters, 1, 1)
pspSize = 2*self.net.level3[-1].module_act.num_parameters
self.pspMod = nn.Sequential(EESP(pspSize, pspSize //2, stride=1, k=4, r_lim=7),
PSPModule(pspSize // 2, pspSize //2))
self.project_l3 = nn.Sequential(nn.Dropout2d(p=p), C(pspSize // 2, classes, 1, 1))
self.act_l3 = BR(classes)
self.project_l2 = CBR(self.net.level2_0.act.num_parameters + classes, classes, 1, 1)
self.project_l1 = nn.Sequential(nn.Dropout2d(p=p), C(self.net.level1.act.num_parameters + classes, classes, 1, 1))
def hierarchicalUpsample(self, x, factor=3):
for i in range(factor):
x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True)
return x
def forward(self, input):
out_l1, out_l2, out_l3, out_l4 = self.net(input, seg=True)
out_l4_proj = self.proj_L4_C(out_l4)
up_l4_to_l3 = F.interpolate(out_l4_proj, size=out_l3.size()[2:], mode='bilinear', align_corners=True)
merged_l3_upl4 = self.pspMod(torch.cat([out_l3, up_l4_to_l3], 1))
proj_merge_l3_bef_act = self.project_l3(merged_l3_upl4)
proj_merge_l3 = self.act_l3(proj_merge_l3_bef_act)
out_up_l3 = F.interpolate(proj_merge_l3, scale_factor=2, mode='bilinear', align_corners=True)
merge_l2 = self.project_l2(torch.cat([out_l2, out_up_l3], 1))
out_up_l2 = F.interpolate(merge_l2, scale_factor=2, mode='bilinear', align_corners=True)
merge_l1 = self.project_l1(torch.cat([out_l1, out_up_l2], 1))
# if self.training:
# return F.interpolate(merge_l1, scale_factor=2, mode='bilinear', align_corners=True), self.hierarchicalUpsample(proj_merge_l3_bef_act)
# else:
# return F.interpolate(merge_l1, scale_factor=2, mode='bilinear', align_corners=True)
output = F.interpolate(merge_l1, scale_factor=2, mode='bilinear', align_corners=True)
return output
if __name__ == '__main__':
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = EESPNet_SegX4(classes=11, s=2).to(device)
summary(model, (3, 352, 480))
flops_count, params_count = get_model_complexity_info(model, (3, 352, 480),
as_strings=False,
print_per_layer_stat=True)
print(flops_count / 1000000000, 'GMac', params_count / 1000000, params_count / 1024 / 1024 * 4, 'MB')
x = torch.randn(2, 3, 352, 480).to(device)
input = x
macs, params = profile(model, inputs=(input,))
print(macs / 2000000000, 'GMac', params / 1000000, params / 1024 / 1024 * 4, 'MB')
'''
/home/ethan/anaconda3/envs/py36_cuda101/bin/python /home/ethan/codes/Efficient-Segmentation-Networks/model/ESPNet_v2/SegmentationModelX4.py
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Conv2d-1 [-1, 32, 176, 240] 864
BatchNorm2d-2 [-1, 32, 176, 240] 64
PReLU-3 [-1, 32, 176, 240] 32
CBR-4 [-1, 32, 176, 240] 0
AvgPool2d-5 [-1, 32, 88, 120] 0
Conv2d-6 [-1, 32, 177, 241] 128
BatchNorm2d-7 [-1, 32, 177, 241] 64
PReLU-8 [-1, 32, 177, 241] 32
Conv2d-9 [-1, 16, 177, 241] 128
BatchNorm2d-10 [-1, 16, 177, 241] 32
PReLU-11 [-1, 16, 177, 241] 16
CBR1-12 [-1, 16, 177, 241] 0
Conv2d-13 [-1, 16, 88, 120] 144
CDilated1-14 [-1, 16, 88, 120] 0
Conv2d-15 [-1, 16, 88, 120] 144
CDilated1-16 [-1, 16, 88, 120] 0
Conv2d-17 [-1, 16, 88, 120] 144
CDilated1-18 [-1, 16, 88, 120] 0
Conv2d-19 [-1, 16, 88, 120] 144
CDilated1-20 [-1, 16, 88, 120] 0
BatchNorm2d-21 [-1, 64, 88, 120] 128
PReLU-22 [-1, 64, 88, 120] 64
BR-23 [-1, 64, 88, 120] 0
Conv2d-24 [-1, 96, 88, 120] 1,536
BatchNorm2d-25 [-1, 96, 88, 120] 192
CB-26 [-1, 96, 88, 120] 0
EESP1-27 [-1, 96, 88, 120] 0
Conv2d-28 [-1, 3, 88, 120] 81
BatchNorm2d-29 [-1, 3, 88, 120] 6
PReLU-30 [-1, 3, 88, 120] 3
CBR-31 [-1, 3, 88, 120] 0
Conv2d-32 [-1, 128, 88, 120] 384
BatchNorm2d-33 [-1, 128, 88, 120] 256
CB-34 [-1, 128, 88, 120] 0
PReLU-35 [-1, 128, 88, 120] 128
DownSampler-36 [-1, 128, 88, 120] 0
AvgPool2d-37 [-1, 128, 44, 60] 0
Conv2d-38 [-1, 128, 89, 121] 512
BatchNorm2d-39 [-1, 128, 89, 121] 256
PReLU-40 [-1, 128, 89, 121] 128
Conv2d-41 [-1, 20, 89, 121] 640
BatchNorm2d-42 [-1, 20, 89, 121] 40
PReLU-43 [-1, 20, 89, 121] 20
CBR1-44 [-1, 20, 89, 121] 0
Conv2d-45 [-1, 20, 44, 60] 180
CDilated1-46 [-1, 20, 44, 60] 0
Conv2d-47 [-1, 20, 44, 60] 180
CDilated1-48 [-1, 20, 44, 60] 0
Conv2d-49 [-1, 20, 44, 60] 180
CDilated1-50 [-1, 20, 44, 60] 0
Conv2d-51 [-1, 20, 44, 60] 180
CDilated1-52 [-1, 20, 44, 60] 0
BatchNorm2d-53 [-1, 80, 44, 60] 160
PReLU-54 [-1, 80, 44, 60] 80
BR-55 [-1, 80, 44, 60] 0
Conv2d-56 [-1, 128, 44, 60] 2,560
BatchNorm2d-57 [-1, 128, 44, 60] 256
CB-58 [-1, 128, 44, 60] 0
EESP1-59 [-1, 128, 44, 60] 0
Conv2d-60 [-1, 3, 44, 60] 81
BatchNorm2d-61 [-1, 3, 44, 60] 6
PReLU-62 [-1, 3, 44, 60] 3
CBR-63 [-1, 3, 44, 60] 0
Conv2d-64 [-1, 256, 44, 60] 768
BatchNorm2d-65 [-1, 256, 44, 60] 512
CB-66 [-1, 256, 44, 60] 0
PReLU-67 [-1, 256, 44, 60] 256
DownSampler-68 [-1, 256, 44, 60] 0
Conv2d-69 [-1, 64, 44, 60] 4,096
BatchNorm2d-70 [-1, 64, 44, 60] 128
PReLU-71 [-1, 64, 44, 60] 64
CBR-72 [-1, 64, 44, 60] 0
Conv2d-73 [-1, 64, 44, 60] 576
CDilated-74 [-1, 64, 44, 60] 0
Conv2d-75 [-1, 64, 44, 60] 576
CDilated-76 [-1, 64, 44, 60] 0
Conv2d-77 [-1, 64, 44, 60] 576
CDilated-78 [-1, 64, 44, 60] 0
Conv2d-79 [-1, 64, 44, 60] 576
CDilated-80 [-1, 64, 44, 60] 0
BatchNorm2d-81 [-1, 256, 44, 60] 512
PReLU-82 [-1, 256, 44, 60] 256
BR-83 [-1, 256, 44, 60] 0
Conv2d-84 [-1, 256, 44, 60] 16,384
BatchNorm2d-85 [-1, 256, 44, 60] 512
CB-86 [-1, 256, 44, 60] 0
PReLU-87 [-1, 256, 44, 60] 256
EESP-88 [-1, 256, 44, 60] 0
Conv2d-89 [-1, 64, 44, 60] 4,096
BatchNorm2d-90 [-1, 64, 44, 60] 128
PReLU-91 [-1, 64, 44, 60] 64
CBR-92 [-1, 64, 44, 60] 0
Conv2d-93 [-1, 64, 44, 60] 576
CDilated-94 [-1, 64, 44, 60] 0
Conv2d-95 [-1, 64, 44, 60] 576
CDilated-96 [-1, 64, 44, 60] 0
Conv2d-97 [-1, 64, 44, 60] 576
CDilated-98 [-1, 64, 44, 60] 0
Conv2d-99 [-1, 64, 44, 60] 576
CDilated-100 [-1, 64, 44, 60] 0
BatchNorm2d-101 [-1, 256, 44, 60] 512
PReLU-102 [-1, 256, 44, 60] 256
BR-103 [-1, 256, 44, 60] 0
Conv2d-104 [-1, 256, 44, 60] 16,384
BatchNorm2d-105 [-1, 256, 44, 60] 512
CB-106 [-1, 256, 44, 60] 0
PReLU-107 [-1, 256, 44, 60] 256
EESP-108 [-1, 256, 44, 60] 0
Conv2d-109 [-1, 64, 44, 60] 4,096
BatchNorm2d-110 [-1, 64, 44, 60] 128
PReLU-111 [-1, 64, 44, 60] 64
CBR-112 [-1, 64, 44, 60] 0
Conv2d-113 [-1, 64, 44, 60] 576
CDilated-114 [-1, 64, 44, 60] 0
Conv2d-115 [-1, 64, 44, 60] 576
CDilated-116 [-1, 64, 44, 60] 0
Conv2d-117 [-1, 64, 44, 60] 576
CDilated-118 [-1, 64, 44, 60] 0
Conv2d-119 [-1, 64, 44, 60] 576
CDilated-120 [-1, 64, 44, 60] 0
BatchNorm2d-121 [-1, 256, 44, 60] 512
PReLU-122 [-1, 256, 44, 60] 256
BR-123 [-1, 256, 44, 60] 0
Conv2d-124 [-1, 256, 44, 60] 16,384
BatchNorm2d-125 [-1, 256, 44, 60] 512
CB-126 [-1, 256, 44, 60] 0
PReLU-127 [-1, 256, 44, 60] 256
EESP-128 [-1, 256, 44, 60] 0
AvgPool2d-129 [-1, 256, 22, 30] 0
Conv2d-130 [-1, 256, 45, 61] 1,024
BatchNorm2d-131 [-1, 256, 45, 61] 512
PReLU-132 [-1, 256, 45, 61] 256
Conv2d-133 [-1, 44, 45, 61] 2,816
BatchNorm2d-134 [-1, 44, 45, 61] 88
PReLU-135 [-1, 44, 45, 61] 44
CBR1-136 [-1, 44, 45, 61] 0
Conv2d-137 [-1, 44, 22, 30] 396
CDilated1-138 [-1, 44, 22, 30] 0
Conv2d-139 [-1, 44, 22, 30] 396
CDilated1-140 [-1, 44, 22, 30] 0
Conv2d-141 [-1, 44, 22, 30] 396
CDilated1-142 [-1, 44, 22, 30] 0
Conv2d-143 [-1, 44, 22, 30] 396
CDilated1-144 [-1, 44, 22, 30] 0
BatchNorm2d-145 [-1, 176, 22, 30] 352
PReLU-146 [-1, 176, 22, 30] 176
BR-147 [-1, 176, 22, 30] 0
Conv2d-148 [-1, 256, 22, 30] 11,264
BatchNorm2d-149 [-1, 256, 22, 30] 512
CB-150 [-1, 256, 22, 30] 0
EESP1-151 [-1, 256, 22, 30] 0
Conv2d-152 [-1, 3, 22, 30] 81
BatchNorm2d-153 [-1, 3, 22, 30] 6
PReLU-154 [-1, 3, 22, 30] 3
CBR-155 [-1, 3, 22, 30] 0
Conv2d-156 [-1, 512, 22, 30] 1,536
BatchNorm2d-157 [-1, 512, 22, 30] 1,024
CB-158 [-1, 512, 22, 30] 0
PReLU-159 [-1, 512, 22, 30] 512
DownSampler-160 [-1, 512, 22, 30] 0
Conv2d-161 [-1, 128, 22, 30] 16,384
BatchNorm2d-162 [-1, 128, 22, 30] 256
PReLU-163 [-1, 128, 22, 30] 128
CBR-164 [-1, 128, 22, 30] 0
Conv2d-165 [-1, 128, 22, 30] 1,152
CDilated-166 [-1, 128, 22, 30] 0
Conv2d-167 [-1, 128, 22, 30] 1,152
CDilated-168 [-1, 128, 22, 30] 0
Conv2d-169 [-1, 128, 22, 30] 1,152
CDilated-170 [-1, 128, 22, 30] 0
Conv2d-171 [-1, 128, 22, 30] 1,152
CDilated-172 [-1, 128, 22, 30] 0
BatchNorm2d-173 [-1, 512, 22, 30] 1,024
PReLU-174 [-1, 512, 22, 30] 512
BR-175 [-1, 512, 22, 30] 0
Conv2d-176 [-1, 512, 22, 30] 65,536
BatchNorm2d-177 [-1, 512, 22, 30] 1,024
CB-178 [-1, 512, 22, 30] 0
PReLU-179 [-1, 512, 22, 30] 512
EESP-180 [-1, 512, 22, 30] 0
Conv2d-181 [-1, 128, 22, 30] 16,384
BatchNorm2d-182 [-1, 128, 22, 30] 256
PReLU-183 [-1, 128, 22, 30] 128
CBR-184 [-1, 128, 22, 30] 0
Conv2d-185 [-1, 128, 22, 30] 1,152
CDilated-186 [-1, 128, 22, 30] 0
Conv2d-187 [-1, 128, 22, 30] 1,152
CDilated-188 [-1, 128, 22, 30] 0
Conv2d-189 [-1, 128, 22, 30] 1,152
CDilated-190 [-1, 128, 22, 30] 0
Conv2d-191 [-1, 128, 22, 30] 1,152
CDilated-192 [-1, 128, 22, 30] 0
BatchNorm2d-193 [-1, 512, 22, 30] 1,024
PReLU-194 [-1, 512, 22, 30] 512
BR-195 [-1, 512, 22, 30] 0
Conv2d-196 [-1, 512, 22, 30] 65,536
BatchNorm2d-197 [-1, 512, 22, 30] 1,024
CB-198 [-1, 512, 22, 30] 0
PReLU-199 [-1, 512, 22, 30] 512
EESP-200 [-1, 512, 22, 30] 0
Conv2d-201 [-1, 128, 22, 30] 16,384
BatchNorm2d-202 [-1, 128, 22, 30] 256
PReLU-203 [-1, 128, 22, 30] 128
CBR-204 [-1, 128, 22, 30] 0
Conv2d-205 [-1, 128, 22, 30] 1,152
CDilated-206 [-1, 128, 22, 30] 0
Conv2d-207 [-1, 128, 22, 30] 1,152
CDilated-208 [-1, 128, 22, 30] 0
Conv2d-209 [-1, 128, 22, 30] 1,152
CDilated-210 [-1, 128, 22, 30] 0
Conv2d-211 [-1, 128, 22, 30] 1,152
CDilated-212 [-1, 128, 22, 30] 0
BatchNorm2d-213 [-1, 512, 22, 30] 1,024
PReLU-214 [-1, 512, 22, 30] 512
BR-215 [-1, 512, 22, 30] 0
Conv2d-216 [-1, 512, 22, 30] 65,536
BatchNorm2d-217 [-1, 512, 22, 30] 1,024
CB-218 [-1, 512, 22, 30] 0
PReLU-219 [-1, 512, 22, 30] 512
EESP-220 [-1, 512, 22, 30] 0
Conv2d-221 [-1, 128, 22, 30] 16,384
BatchNorm2d-222 [-1, 128, 22, 30] 256
PReLU-223 [-1, 128, 22, 30] 128
CBR-224 [-1, 128, 22, 30] 0
Conv2d-225 [-1, 128, 22, 30] 1,152
CDilated-226 [-1, 128, 22, 30] 0
Conv2d-227 [-1, 128, 22, 30] 1,152
CDilated-228 [-1, 128, 22, 30] 0
Conv2d-229 [-1, 128, 22, 30] 1,152
CDilated-230 [-1, 128, 22, 30] 0
Conv2d-231 [-1, 128, 22, 30] 1,152
CDilated-232 [-1, 128, 22, 30] 0
BatchNorm2d-233 [-1, 512, 22, 30] 1,024
PReLU-234 [-1, 512, 22, 30] 512
BR-235 [-1, 512, 22, 30] 0
Conv2d-236 [-1, 512, 22, 30] 65,536
BatchNorm2d-237 [-1, 512, 22, 30] 1,024
CB-238 [-1, 512, 22, 30] 0
PReLU-239 [-1, 512, 22, 30] 512
EESP-240 [-1, 512, 22, 30] 0
Conv2d-241 [-1, 128, 22, 30] 16,384
BatchNorm2d-242 [-1, 128, 22, 30] 256
PReLU-243 [-1, 128, 22, 30] 128
CBR-244 [-1, 128, 22, 30] 0
Conv2d-245 [-1, 128, 22, 30] 1,152
CDilated-246 [-1, 128, 22, 30] 0
Conv2d-247 [-1, 128, 22, 30] 1,152
CDilated-248 [-1, 128, 22, 30] 0
Conv2d-249 [-1, 128, 22, 30] 1,152
CDilated-250 [-1, 128, 22, 30] 0
Conv2d-251 [-1, 128, 22, 30] 1,152
CDilated-252 [-1, 128, 22, 30] 0
BatchNorm2d-253 [-1, 512, 22, 30] 1,024
PReLU-254 [-1, 512, 22, 30] 512
BR-255 [-1, 512, 22, 30] 0
Conv2d-256 [-1, 512, 22, 30] 65,536
BatchNorm2d-257 [-1, 512, 22, 30] 1,024
CB-258 [-1, 512, 22, 30] 0
PReLU-259 [-1, 512, 22, 30] 512
EESP-260 [-1, 512, 22, 30] 0
Conv2d-261 [-1, 128, 22, 30] 16,384
BatchNorm2d-262 [-1, 128, 22, 30] 256
PReLU-263 [-1, 128, 22, 30] 128
CBR-264 [-1, 128, 22, 30] 0
Conv2d-265 [-1, 128, 22, 30] 1,152
CDilated-266 [-1, 128, 22, 30] 0
Conv2d-267 [-1, 128, 22, 30] 1,152
CDilated-268 [-1, 128, 22, 30] 0
Conv2d-269 [-1, 128, 22, 30] 1,152
CDilated-270 [-1, 128, 22, 30] 0
Conv2d-271 [-1, 128, 22, 30] 1,152
CDilated-272 [-1, 128, 22, 30] 0
BatchNorm2d-273 [-1, 512, 22, 30] 1,024
PReLU-274 [-1, 512, 22, 30] 512
BR-275 [-1, 512, 22, 30] 0
Conv2d-276 [-1, 512, 22, 30] 65,536
BatchNorm2d-277 [-1, 512, 22, 30] 1,024
CB-278 [-1, 512, 22, 30] 0
PReLU-279 [-1, 512, 22, 30] 512
EESP-280 [-1, 512, 22, 30] 0
Conv2d-281 [-1, 128, 22, 30] 16,384
BatchNorm2d-282 [-1, 128, 22, 30] 256
PReLU-283 [-1, 128, 22, 30] 128
CBR-284 [-1, 128, 22, 30] 0
Conv2d-285 [-1, 128, 22, 30] 1,152
CDilated-286 [-1, 128, 22, 30] 0
Conv2d-287 [-1, 128, 22, 30] 1,152
CDilated-288 [-1, 128, 22, 30] 0
Conv2d-289 [-1, 128, 22, 30] 1,152
CDilated-290 [-1, 128, 22, 30] 0
Conv2d-291 [-1, 128, 22, 30] 1,152
CDilated-292 [-1, 128, 22, 30] 0
BatchNorm2d-293 [-1, 512, 22, 30] 1,024
PReLU-294 [-1, 512, 22, 30] 512
BR-295 [-1, 512, 22, 30] 0
Conv2d-296 [-1, 512, 22, 30] 65,536
BatchNorm2d-297 [-1, 512, 22, 30] 1,024
CB-298 [-1, 512, 22, 30] 0
PReLU-299 [-1, 512, 22, 30] 512
EESP-300 [-1, 512, 22, 30] 0
EESPNet-301 [[-1, 32, 176, 240], [-1, 128, 88, 120], [-1, 256, 44, 60], [-1, 512, 22, 30]] 0
Conv2d-302 [-1, 256, 22, 30] 131,072
BatchNorm2d-303 [-1, 256, 22, 30] 512
PReLU-304 [-1, 256, 22, 30] 256
CBR-305 [-1, 256, 22, 30] 0
Conv2d-306 [-1, 64, 44, 60] 8,192
BatchNorm2d-307 [-1, 64, 44, 60] 128
PReLU-308 [-1, 64, 44, 60] 64
CBR-309 [-1, 64, 44, 60] 0
Conv2d-310 [-1, 64, 44, 60] 576
CDilated-311 [-1, 64, 44, 60] 0
Conv2d-312 [-1, 64, 44, 60] 576
CDilated-313 [-1, 64, 44, 60] 0
Conv2d-314 [-1, 64, 44, 60] 576
CDilated-315 [-1, 64, 44, 60] 0
Conv2d-316 [-1, 64, 44, 60] 576
CDilated-317 [-1, 64, 44, 60] 0
BatchNorm2d-318 [-1, 256, 44, 60] 512
PReLU-319 [-1, 256, 44, 60] 256
BR-320 [-1, 256, 44, 60] 0
Conv2d-321 [-1, 256, 44, 60] 16,384
BatchNorm2d-322 [-1, 256, 44, 60] 512
CB-323 [-1, 256, 44, 60] 0
PReLU-324 [-1, 256, 44, 60] 256
EESP-325 [-1, 256, 44, 60] 0
Conv2d-326 [-1, 256, 22, 30] 2,304
C-327 [-1, 256, 22, 30] 0
Conv2d-328 [-1, 256, 11, 15] 2,304
C-329 [-1, 256, 11, 15] 0
Conv2d-330 [-1, 256, 6, 8] 2,304
C-331 [-1, 256, 6, 8] 0
Conv2d-332 [-1, 256, 3, 4] 2,304
C-333 [-1, 256, 3, 4] 0
Conv2d-334 [-1, 256, 44, 60] 327,680
BatchNorm2d-335 [-1, 256, 44, 60] 512
PReLU-336 [-1, 256, 44, 60] 256
CBR-337 [-1, 256, 44, 60] 0
PSPModule-338 [-1, 256, 44, 60] 0
Dropout2d-339 [-1, 256, 44, 60] 0
Conv2d-340 [-1, 11, 44, 60] 2,816
C-341 [-1, 11, 44, 60] 0
BatchNorm2d-342 [-1, 11, 44, 60] 22
PReLU-343 [-1, 11, 44, 60] 11
BR-344 [-1, 11, 44, 60] 0
Conv2d-345 [-1, 11, 88, 120] 1,529
BatchNorm2d-346 [-1, 11, 88, 120] 22
PReLU-347 [-1, 11, 88, 120] 11
CBR-348 [-1, 11, 88, 120] 0
Dropout2d-349 [-1, 43, 176, 240] 0
Conv2d-350 [-1, 11, 176, 240] 473
C-351 [-1, 11, 176, 240] 0
================================================================
Total params: 1,239,922
Trainable params: 1,239,922
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 1.93
Forward/backward pass size (MB): 49542447760184.38
Params size (MB): 4.73
Estimated Total Size (MB): 49542447760191.05
----------------------------------------------------------------
EESPNet_SegX4(
1.838 GMac, 100.000% MACs,
(net): EESPNet(
0.762 GMac, 41.465% MACs,
(level1): CBR(
0.041 GMac, 2.207% MACs,
(conv): Conv2d(0.036 GMac, 1.986% MACs, 3, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.003 GMac, 0.147% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.001 GMac, 0.074% MACs, num_parameters=32)
)
(level2_0): DownSampler(
0.054 GMac, 2.929% MACs,
(eesp): EESP1(
0.043 GMac, 2.363% MACs,
(proj_1x1): CBR1(
0.017 GMac, 0.929% MACs,
(conv0): Conv2d(0.005 GMac, 0.297% MACs, 32, 32, kernel_size=(2, 2), stride=(1, 1), padding=(1, 1), groups=32, bias=False)
(bn0): BatchNorm2d(0.003 GMac, 0.149% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act0): PReLU(0.001 GMac, 0.074% MACs, num_parameters=32)
(conv): Conv2d(0.005 GMac, 0.297% MACs, 32, 16, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.074% MACs, 16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.001 GMac, 0.037% MACs, num_parameters=16)
)
(spp_dw): ModuleList(
0.006 GMac, 0.331% MACs,
(0): CDilated1(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 16, 16, kernel_size=(3, 3), stride=(2, 2), groups=16, bias=False)
)
(1): CDilated1(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 16, 16, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), dilation=(2, 2), groups=16, bias=False)
)
(2): CDilated1(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 16, 16, kernel_size=(3, 3), stride=(2, 2), padding=(2, 2), dilation=(3, 3), groups=16, bias=False)
)
(3): CDilated1(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 16, 16, kernel_size=(3, 3), stride=(2, 2), padding=(3, 3), dilation=(4, 4), groups=16, bias=False)
)
)
(conv_1x1_exp): CB(
0.018 GMac, 0.993% MACs,
(conv): Conv2d(0.016 GMac, 0.883% MACs, 64, 96, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.002 GMac, 0.110% MACs, 96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(br_after_cat): BR(
0.002 GMac, 0.110% MACs,
(bn): BatchNorm2d(0.001 GMac, 0.074% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.001 GMac, 0.037% MACs, num_parameters=64)
)
(module_act): PReLU(0.0 GMac, 0.000% MACs, num_parameters=96)
)
(avg): AvgPool2d(0.001 GMac, 0.074% MACs, kernel_size=3, stride=2, padding=1)
(inp_reinf): Sequential(
0.008 GMac, 0.419% MACs,
(0): CBR(
0.001 GMac, 0.052% MACs,
(conv): Conv2d(0.001 GMac, 0.047% MACs, 3, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.003% MACs, 3, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.002% MACs, num_parameters=3)
)
(1): CB(
0.007 GMac, 0.368% MACs,
(conv): Conv2d(0.004 GMac, 0.221% MACs, 3, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.003 GMac, 0.147% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(act): PReLU(0.001 GMac, 0.074% MACs, num_parameters=128)
)
(level3_0): DownSampler(
0.033 GMac, 1.785% MACs,
(eesp): EESP1(
0.027 GMac, 1.478% MACs,
(proj_1x1): CBR1(
0.017 GMac, 0.935% MACs,
(conv0): Conv2d(0.006 GMac, 0.300% MACs, 128, 128, kernel_size=(2, 2), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
(bn0): BatchNorm2d(0.003 GMac, 0.150% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act0): PReLU(0.001 GMac, 0.075% MACs, num_parameters=128)
(conv): Conv2d(0.007 GMac, 0.375% MACs, 128, 20, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.023% MACs, 20, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.012% MACs, num_parameters=20)
)
(spp_dw): ModuleList(
0.002 GMac, 0.103% MACs,
(0): CDilated1(
0.0 GMac, 0.026% MACs,
(conv): Conv2d(0.0 GMac, 0.026% MACs, 20, 20, kernel_size=(3, 3), stride=(2, 2), groups=20, bias=False)
)
(1): CDilated1(
0.0 GMac, 0.026% MACs,
(conv): Conv2d(0.0 GMac, 0.026% MACs, 20, 20, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), dilation=(2, 2), groups=20, bias=False)
)
(2): CDilated1(
0.0 GMac, 0.026% MACs,
(conv): Conv2d(0.0 GMac, 0.026% MACs, 20, 20, kernel_size=(3, 3), stride=(2, 2), padding=(2, 2), dilation=(3, 3), groups=20, bias=False)
)
(3): CDilated1(
0.0 GMac, 0.026% MACs,
(conv): Conv2d(0.0 GMac, 0.026% MACs, 20, 20, kernel_size=(3, 3), stride=(2, 2), padding=(3, 3), dilation=(4, 4), groups=20, bias=False)
)
)
(conv_1x1_exp): CB(
0.007 GMac, 0.405% MACs,
(conv): Conv2d(0.007 GMac, 0.368% MACs, 80, 128, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(br_after_cat): BR(
0.001 GMac, 0.034% MACs,
(bn): BatchNorm2d(0.0 GMac, 0.023% MACs, 80, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.011% MACs, num_parameters=80)
)
(module_act): PReLU(0.0 GMac, 0.000% MACs, num_parameters=128)
)
(avg): AvgPool2d(0.001 GMac, 0.074% MACs, kernel_size=3, stride=2, padding=1)
(inp_reinf): Sequential(
0.004 GMac, 0.197% MACs,
(0): CBR(
0.0 GMac, 0.013% MACs,
(conv): Conv2d(0.0 GMac, 0.012% MACs, 3, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 3, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.000% MACs, num_parameters=3)
)
(1): CB(
0.003 GMac, 0.184% MACs,
(conv): Conv2d(0.002 GMac, 0.110% MACs, 3, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.074% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(act): PReLU(0.001 GMac, 0.037% MACs, num_parameters=256)
)
(level3): ModuleList(
0.194 GMac, 10.564% MACs,
(0): EESP(
0.065 GMac, 3.521% MACs,
(proj_1x1): CBR(
0.011 GMac, 0.616% MACs,
(conv): Conv2d(0.011 GMac, 0.588% MACs, 256, 64, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.018% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.009% MACs, num_parameters=64)
)
(spp_dw): ModuleList(
0.006 GMac, 0.331% MACs,
(0): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=64, bias=False)
)
(1): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=64, bias=False)
)
(2): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(3, 3), dilation=(3, 3), groups=64, bias=False)
)
(3): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(4, 4), dilation=(4, 4), groups=64, bias=False)
)
)
(conv_1x1_exp): CB(
0.045 GMac, 2.427% MACs,
(conv): Conv2d(0.043 GMac, 2.354% MACs, 256, 256, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.074% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(br_after_cat): BR(
0.002 GMac, 0.110% MACs,
(bn): BatchNorm2d(0.001 GMac, 0.074% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.001 GMac, 0.037% MACs, num_parameters=256)
)
(module_act): PReLU(0.001 GMac, 0.037% MACs, num_parameters=256)
)
(1): EESP(
0.065 GMac, 3.521% MACs,
(proj_1x1): CBR(
0.011 GMac, 0.616% MACs,
(conv): Conv2d(0.011 GMac, 0.588% MACs, 256, 64, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.018% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.009% MACs, num_parameters=64)
)
(spp_dw): ModuleList(
0.006 GMac, 0.331% MACs,
(0): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=64, bias=False)
)
(1): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=64, bias=False)
)
(2): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(3, 3), dilation=(3, 3), groups=64, bias=False)
)
(3): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(4, 4), dilation=(4, 4), groups=64, bias=False)
)
)
(conv_1x1_exp): CB(
0.045 GMac, 2.427% MACs,
(conv): Conv2d(0.043 GMac, 2.354% MACs, 256, 256, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.074% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(br_after_cat): BR(
0.002 GMac, 0.110% MACs,
(bn): BatchNorm2d(0.001 GMac, 0.074% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.001 GMac, 0.037% MACs, num_parameters=256)
)
(module_act): PReLU(0.001 GMac, 0.037% MACs, num_parameters=256)
)
(2): EESP(
0.065 GMac, 3.521% MACs,
(proj_1x1): CBR(
0.011 GMac, 0.616% MACs,
(conv): Conv2d(0.011 GMac, 0.588% MACs, 256, 64, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.018% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.009% MACs, num_parameters=64)
)
(spp_dw): ModuleList(
0.006 GMac, 0.331% MACs,
(0): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=64, bias=False)
)
(1): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=64, bias=False)
)
(2): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(3, 3), dilation=(3, 3), groups=64, bias=False)
)
(3): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(4, 4), dilation=(4, 4), groups=64, bias=False)
)
)
(conv_1x1_exp): CB(
0.045 GMac, 2.427% MACs,
(conv): Conv2d(0.043 GMac, 2.354% MACs, 256, 256, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.074% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(br_after_cat): BR(
0.002 GMac, 0.110% MACs,
(bn): BatchNorm2d(0.001 GMac, 0.074% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.001 GMac, 0.037% MACs, num_parameters=256)
)
(module_act): PReLU(0.001 GMac, 0.037% MACs, num_parameters=256)
)
)
(level4_0): DownSampler(
0.025 GMac, 1.357% MACs,
(eesp): EESP1(
0.022 GMac, 1.207% MACs,
(proj_1x1): CBR1(
0.013 GMac, 0.708% MACs,
(conv0): Conv2d(0.003 GMac, 0.153% MACs, 256, 256, kernel_size=(2, 2), stride=(1, 1), padding=(1, 1), groups=256, bias=False)
(bn0): BatchNorm2d(0.001 GMac, 0.076% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act0): PReLU(0.001 GMac, 0.038% MACs, num_parameters=256)
(conv): Conv2d(0.008 GMac, 0.421% MACs, 256, 44, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.013% MACs, 44, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.007% MACs, num_parameters=44)
)
(spp_dw): ModuleList(
0.001 GMac, 0.057% MACs,
(0): CDilated1(
0.0 GMac, 0.014% MACs,
(conv): Conv2d(0.0 GMac, 0.014% MACs, 44, 44, kernel_size=(3, 3), stride=(2, 2), groups=44, bias=False)
)
(1): CDilated1(
0.0 GMac, 0.014% MACs,
(conv): Conv2d(0.0 GMac, 0.014% MACs, 44, 44, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), dilation=(2, 2), groups=44, bias=False)
)
(2): CDilated1(
0.0 GMac, 0.014% MACs,
(conv): Conv2d(0.0 GMac, 0.014% MACs, 44, 44, kernel_size=(3, 3), stride=(2, 2), padding=(2, 2), dilation=(3, 3), groups=44, bias=False)
)
(3): CDilated1(
0.0 GMac, 0.014% MACs,
(conv): Conv2d(0.0 GMac, 0.014% MACs, 44, 44, kernel_size=(3, 3), stride=(2, 2), padding=(3, 3), dilation=(4, 4), groups=44, bias=False)
)
)
(conv_1x1_exp): CB(
0.008 GMac, 0.423% MACs,
(conv): Conv2d(0.007 GMac, 0.405% MACs, 176, 256, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.018% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(br_after_cat): BR(
0.0 GMac, 0.019% MACs,
(bn): BatchNorm2d(0.0 GMac, 0.013% MACs, 176, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.006% MACs, num_parameters=176)
)
(module_act): PReLU(0.0 GMac, 0.000% MACs, num_parameters=256)
)
(avg): AvgPool2d(0.001 GMac, 0.037% MACs, kernel_size=3, stride=2, padding=1)
(inp_reinf): Sequential(
0.002 GMac, 0.095% MACs,
(0): CBR(
0.0 GMac, 0.003% MACs,
(conv): Conv2d(0.0 GMac, 0.003% MACs, 3, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 3, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.000% MACs, num_parameters=3)
)
(1): CB(
0.002 GMac, 0.092% MACs,
(conv): Conv2d(0.001 GMac, 0.055% MACs, 3, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
(level4): ModuleList(
0.416 GMac, 22.623% MACs,
(0): EESP(
0.059 GMac, 3.232% MACs,
(proj_1x1): CBR(
0.011 GMac, 0.602% MACs,
(conv): Conv2d(0.011 GMac, 0.588% MACs, 512, 128, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.009% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.005% MACs, num_parameters=128)
)
(spp_dw): ModuleList(
0.003 GMac, 0.165% MACs,
(0): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
)
(1): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
)
(2): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=128, bias=False)
)
(3): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(3, 3), dilation=(3, 3), groups=128, bias=False)
)
)
(conv_1x1_exp): CB(
0.044 GMac, 2.391% MACs,
(conv): Conv2d(0.043 GMac, 2.354% MACs, 512, 512, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(br_after_cat): BR(
0.001 GMac, 0.055% MACs,
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
(module_act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
(1): EESP(
0.059 GMac, 3.232% MACs,
(proj_1x1): CBR(
0.011 GMac, 0.602% MACs,
(conv): Conv2d(0.011 GMac, 0.588% MACs, 512, 128, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.009% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.005% MACs, num_parameters=128)
)
(spp_dw): ModuleList(
0.003 GMac, 0.165% MACs,
(0): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
)
(1): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
)
(2): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=128, bias=False)
)
(3): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(3, 3), dilation=(3, 3), groups=128, bias=False)
)
)
(conv_1x1_exp): CB(
0.044 GMac, 2.391% MACs,
(conv): Conv2d(0.043 GMac, 2.354% MACs, 512, 512, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(br_after_cat): BR(
0.001 GMac, 0.055% MACs,
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
(module_act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
(2): EESP(
0.059 GMac, 3.232% MACs,
(proj_1x1): CBR(
0.011 GMac, 0.602% MACs,
(conv): Conv2d(0.011 GMac, 0.588% MACs, 512, 128, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.009% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.005% MACs, num_parameters=128)
)
(spp_dw): ModuleList(
0.003 GMac, 0.165% MACs,
(0): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
)
(1): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
)
(2): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=128, bias=False)
)
(3): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(3, 3), dilation=(3, 3), groups=128, bias=False)
)
)
(conv_1x1_exp): CB(
0.044 GMac, 2.391% MACs,
(conv): Conv2d(0.043 GMac, 2.354% MACs, 512, 512, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(br_after_cat): BR(
0.001 GMac, 0.055% MACs,
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
(module_act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
(3): EESP(
0.059 GMac, 3.232% MACs,
(proj_1x1): CBR(
0.011 GMac, 0.602% MACs,
(conv): Conv2d(0.011 GMac, 0.588% MACs, 512, 128, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.009% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.005% MACs, num_parameters=128)
)
(spp_dw): ModuleList(
0.003 GMac, 0.165% MACs,
(0): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
)
(1): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
)
(2): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=128, bias=False)
)
(3): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(3, 3), dilation=(3, 3), groups=128, bias=False)
)
)
(conv_1x1_exp): CB(
0.044 GMac, 2.391% MACs,
(conv): Conv2d(0.043 GMac, 2.354% MACs, 512, 512, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(br_after_cat): BR(
0.001 GMac, 0.055% MACs,
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
(module_act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
(4): EESP(
0.059 GMac, 3.232% MACs,
(proj_1x1): CBR(
0.011 GMac, 0.602% MACs,
(conv): Conv2d(0.011 GMac, 0.588% MACs, 512, 128, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.009% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.005% MACs, num_parameters=128)
)
(spp_dw): ModuleList(
0.003 GMac, 0.165% MACs,
(0): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
)
(1): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
)
(2): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=128, bias=False)
)
(3): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(3, 3), dilation=(3, 3), groups=128, bias=False)
)
)
(conv_1x1_exp): CB(
0.044 GMac, 2.391% MACs,
(conv): Conv2d(0.043 GMac, 2.354% MACs, 512, 512, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(br_after_cat): BR(
0.001 GMac, 0.055% MACs,
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
(module_act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
(5): EESP(
0.059 GMac, 3.232% MACs,
(proj_1x1): CBR(
0.011 GMac, 0.602% MACs,
(conv): Conv2d(0.011 GMac, 0.588% MACs, 512, 128, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.009% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.005% MACs, num_parameters=128)
)
(spp_dw): ModuleList(
0.003 GMac, 0.165% MACs,
(0): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
)
(1): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
)
(2): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=128, bias=False)
)
(3): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(3, 3), dilation=(3, 3), groups=128, bias=False)
)
)
(conv_1x1_exp): CB(
0.044 GMac, 2.391% MACs,
(conv): Conv2d(0.043 GMac, 2.354% MACs, 512, 512, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(br_after_cat): BR(
0.001 GMac, 0.055% MACs,
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
(module_act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
(6): EESP(
0.059 GMac, 3.232% MACs,
(proj_1x1): CBR(
0.011 GMac, 0.602% MACs,
(conv): Conv2d(0.011 GMac, 0.588% MACs, 512, 128, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.009% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.005% MACs, num_parameters=128)
)
(spp_dw): ModuleList(
0.003 GMac, 0.165% MACs,
(0): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
)
(1): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
)
(2): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=128, bias=False)
)
(3): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(3, 3), dilation=(3, 3), groups=128, bias=False)
)
)
(conv_1x1_exp): CB(
0.044 GMac, 2.391% MACs,
(conv): Conv2d(0.043 GMac, 2.354% MACs, 512, 512, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(br_after_cat): BR(
0.001 GMac, 0.055% MACs,
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
(module_act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
)
)
(proj_L4_C): CBR(
0.087 GMac, 4.735% MACs,
(conv): Conv2d(0.087 GMac, 4.707% MACs, 512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.018% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.009% MACs, num_parameters=256)
)
(pspMod): Sequential(
0.945 GMac, 51.406% MACs,
(0): EESP(
0.076 GMac, 4.110% MACs,
(proj_1x1): CBR(
0.022 GMac, 1.204% MACs,
(conv): Conv2d(0.022 GMac, 1.177% MACs, 512, 64, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.018% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.009% MACs, num_parameters=64)
)
(spp_dw): ModuleList(
0.006 GMac, 0.331% MACs,
(0): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=64, bias=False)
)
(1): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=64, bias=False)
)
(2): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=64, bias=False)
)
(3): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(3, 3), dilation=(3, 3), groups=64, bias=False)
)
)
(conv_1x1_exp): CB(
0.045 GMac, 2.427% MACs,
(conv): Conv2d(0.043 GMac, 2.354% MACs, 256, 256, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.074% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(br_after_cat): BR(
0.002 GMac, 0.110% MACs,
(bn): BatchNorm2d(0.001 GMac, 0.074% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.001 GMac, 0.037% MACs, num_parameters=256)
)
(module_act): PReLU(0.001 GMac, 0.037% MACs, num_parameters=256)
)
(1): PSPModule(
0.869 GMac, 47.296% MACs,
(stages): ModuleList(
0.002 GMac, 0.111% MACs,
(0): C(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=256, bias=False)
)
(1): C(
0.0 GMac, 0.021% MACs,
(conv): Conv2d(0.0 GMac, 0.021% MACs, 256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=256, bias=False)
)
(2): C(
0.0 GMac, 0.006% MACs,
(conv): Conv2d(0.0 GMac, 0.006% MACs, 256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=256, bias=False)
)
(3): C(
0.0 GMac, 0.002% MACs,
(conv): Conv2d(0.0 GMac, 0.002% MACs, 256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=256, bias=False)
)
)
(project): CBR(
0.867 GMac, 47.185% MACs,
(conv): Conv2d(0.865 GMac, 47.075% MACs, 1280, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.074% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.001 GMac, 0.037% MACs, num_parameters=256)
)
)
)
(project_l3): Sequential(
0.007 GMac, 0.405% MACs,
(0): Dropout2d(0.0 GMac, 0.000% MACs, p=0.2, inplace=False)
(1): C(
0.007 GMac, 0.405% MACs,
(conv): Conv2d(0.007 GMac, 0.405% MACs, 256, 11, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
)
(act_l3): BR(
0.0 GMac, 0.005% MACs,
(bn): BatchNorm2d(0.0 GMac, 0.003% MACs, 11, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.002% MACs, num_parameters=11)
)
(project_l2): CBR(
0.016 GMac, 0.898% MACs,
(conv): Conv2d(0.016 GMac, 0.879% MACs, 139, 11, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.013% MACs, 11, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.006% MACs, num_parameters=11)
)
(project_l1): Sequential(
0.02 GMac, 1.087% MACs,
(0): Dropout2d(0.0 GMac, 0.000% MACs, p=0.2, inplace=False)
(1): C(
0.02 GMac, 1.087% MACs,
(conv): Conv2d(0.02 GMac, 1.087% MACs, 43, 11, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
)
)
1.837661544 GMac 1.240402 4.731758117675781 MB
[INFO] Register count_convNd() for <class 'torch.nn.modules.conv.Conv2d'>.
[INFO] Register count_bn() for <class 'torch.nn.modules.batchnorm.BatchNorm2d'>.
[WARN] Cannot find rule for <class 'torch.nn.modules.activation.PReLU'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class 'model.ESPNet_v2.cnn_utilsX4.CBR'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class 'model.ESPNet_v2.cnn_utilsX4.CBR1'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class 'model.ESPNet_v2.cnn_utilsX4.CDilated1'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class 'torch.nn.modules.container.ModuleList'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class 'model.ESPNet_v2.cnn_utilsX4.CB'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class 'model.ESPNet_v2.cnn_utilsX4.BR'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class 'model.ESPNet_v2.ModelX4.EESP1'>. Treat it as zero Macs and zero Params.
[INFO] Register count_avgpool() for <class 'torch.nn.modules.pooling.AvgPool2d'>.
[WARN] Cannot find rule for <class 'torch.nn.modules.container.Sequential'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class 'model.ESPNet_v2.ModelX4.DownSampler'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class 'model.ESPNet_v2.cnn_utilsX4.CDilated'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class 'model.ESPNet_v2.ModelX4.EESP'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class 'model.ESPNet_v2.ModelX4.EESPNet'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class 'model.ESPNet_v2.cnn_utilsX4.C'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class 'model.ESPNet_v2.cnn_utilsX4.PSPModule'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class 'torch.nn.modules.dropout.Dropout2d'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.EESPNet_SegX4'>. Treat it as zero Macs and zero Params.
1.813505596 GMac 1.227267 4.681652069091797 MB
Process finished with exit code 0
'''
|
from typing import Any, Dict, Tuple
from ee.clickhouse.queries.event_query import ClickhouseEventQuery
from ee.clickhouse.queries.trends.util import get_active_user_params, populate_entity_params
from ee.clickhouse.queries.util import date_from_clause, get_time_diff, get_trunc_func_ch, parse_timestamps
from posthog.constants import MONTHLY_ACTIVE, WEEKLY_ACTIVE
from posthog.models import Entity
class TrendsEventQuery(ClickhouseEventQuery):
_entity: Entity
def __init__(self, entity: Entity, *args, **kwargs):
self._entity = entity
super().__init__(*args, **kwargs)
def get_query(self) -> Tuple[str, Dict[str, Any]]:
_fields = (
f"{self.EVENT_TABLE_ALIAS}.timestamp as timestamp, {self.EVENT_TABLE_ALIAS}.properties as properties"
+ (f", {self.DISTINCT_ID_TABLE_ALIAS}.person_id as person_id" if self._should_join_distinct_ids else "")
+ (f", {self.PERSON_TABLE_ALIAS}.person_props as person_props" if self._should_join_persons else "")
)
date_query, date_params = self._get_date_filter()
self.params.update(date_params)
prop_filters = [*self._filter.properties, *self._entity.properties]
prop_query, prop_params = self._get_props(prop_filters)
self.params.update(prop_params)
entity_query, entity_params = self._get_entity_query()
self.params.update(entity_params)
query = f"""
SELECT {_fields} FROM events {self.EVENT_TABLE_ALIAS}
{self._get_disintct_id_query()}
{self._get_person_query()}
WHERE team_id = %(team_id)s
{entity_query}
{date_query}
{prop_query}
"""
return query, self.params
def _determine_should_join_persons(self) -> None:
super()._determine_should_join_persons()
for prop in self._entity.properties:
if prop.type == "person":
self._should_join_distinct_ids = True
self._should_join_persons = True
return
def _determine_should_join_distinct_ids(self) -> None:
if self._entity.math == "dau":
self._should_join_distinct_ids = True
return
def _get_date_filter(self) -> Tuple[str, Dict]:
date_filter = ""
date_params: Dict[str, Any] = {}
interval_annotation = get_trunc_func_ch(self._filter.interval)
_, _, round_interval = get_time_diff(
self._filter.interval or "day", self._filter.date_from, self._filter.date_to, team_id=self._team_id
)
_, parsed_date_to, date_params = parse_timestamps(filter=self._filter, team_id=self._team_id)
parsed_date_from = date_from_clause(interval_annotation, round_interval)
self.parsed_date_from = parsed_date_from
self.parsed_date_to = parsed_date_to
if self._entity.math in [WEEKLY_ACTIVE, MONTHLY_ACTIVE]:
date_filter = "{parsed_date_from_prev_range} {parsed_date_to}"
format_params = get_active_user_params(self._filter, self._entity, self._team_id)
self.active_user_params = format_params
date_filter = date_filter.format(**format_params, parsed_date_to=parsed_date_to)
else:
date_filter = "{parsed_date_from} {parsed_date_to}".format(
parsed_date_from=parsed_date_from, parsed_date_to=parsed_date_to
)
return date_filter, date_params
def _get_entity_query(self) -> Tuple[str, Dict]:
entity_params, entity_format_params = populate_entity_params(self._entity)
return entity_format_params["entity_query"], entity_params
|
from dataclasses import dataclass, field
from abc import ABC,abstractmethod
from os import error
from Séquence import ErrorBid
from Consts import SUITS,LEVELS
@dataclass
class Card(ABC) :
sort_index: int = field(init=False, repr=False)
level: str
hcp_value : int = 0
def __post_init__(self):
if self.level not in LEVELS :
raise ErrorBid("Invalid level (must be in 2 3 4 5 6 7 8 9 T J Q K A)")
self.sort_index = LEVELS.index(self.level)+2
if self.level == 'J' :
self.hcp_value = 1
if self.level == 'Q' :
self.hcp_value = 2
if self.level == 'K' :
self.hcp_value = 3
if self.level == 'A' :
self.hcp_value = 4
def __str__(self) :
return self.level
def __lt__(self,other) :
return self.sort_index < other.sort_index
@dataclass
class Spade(Card) :
def __str__(self) :
return self.level
@dataclass
class Heart(Card) :
def __str__(self) :
return self.level
@dataclass
class Diamond(Card) :
def __str__(self) :
return self.level
@dataclass
class Club(Card) :
def __str__(self) :
return self.level
total_deck = []
for level in LEVELS :
total_deck.append(Spade(level))
total_deck.append(Heart(level))
total_deck.append(Diamond(level))
total_deck.append(Club(level))
@dataclass
class Hand() :
"""Contain one hand"""
spades : list[Spade]=field(default_factory=list)
hearts : list[Heart]=field(default_factory=list)
diamonds : list[Diamond]=field(default_factory=list)
clubs : list[Club]=field(default_factory=list)
hcp_value : int = field(init=False, repr=False)
def __post_init__(self) :
self.hcp_value=0
for card in self.spades :
self.hcp_value += card.hcp_value
for card in self.hearts :
self.hcp_value += card.hcp_value
for card in self.diamonds :
self.hcp_value += card.hcp_value
for card in self.clubs :
self.hcp_value += card.hcp_value
def clear(self) :
for suit in [self.spades,self.hearts,self.diamonds,self.clubs] :
suit.clear()
return self
def get_every_suit(self) -> list[list[Card]] :
return [self.spades,self.hearts,self.diamonds,self.clubs]
def get_spades_as_text(self) -> str :
string =""
for card in self.spades :
string+= card.__str__()
return string
def get_hearts_as_text(self) -> str :
string =""
for card in self.hearts :
string+= card.__str__()
return string
def get_diamonds_as_text(self) -> str :
string =""
for card in self.diamonds :
string+= card.__str__()
return string
def get_clubs_as_text(self) -> str :
string =""
for card in self.clubs :
string+= card.__str__()
return string
def len(self) -> int :
length = 0
for suit in [self.spades,self.hearts,self.diamonds,self.clubs] :
length += len(suit)
return length
def create_from_string(self, string : str) : #return self
"""Create a hand from a string with the following syntax '752.Q864.84.AT62'"""
self.clear()
tab_of_suit = string.split('.')
for index,suit in enumerate(tab_of_suit) :
for card in suit :
if index == 0 : #Spade
self.spades.append(Spade(card))
if index == 1 :
self.hearts.append(Heart(card))
if index == 2 :
self.diamonds.append(Diamond(card))
if index == 3 :
self.clubs.append(Club(card))
self.order()
return self
def init_from_lin(self, string : str) : #return self
"""Create a hand from a string with the following syntax SK7HAQT632DK4CQ62"""
tab_of_suit = string.replace('S',' ').replace('H',' ').replace('D',' ').replace('C',' ').split()
i=0
if 'S' in string and string[string.find('S')+1] not in SUITS:
for card in tab_of_suit[i] :
self.spades.append(Spade(card))
i+=1
if 'H' in string and string[string.find('H')+1] not in SUITS:
for card in tab_of_suit[i] :
self.hearts.append(Heart(card))
i+=1
if 'D' in string and string[string.find('D')+1] not in SUITS:
for card in tab_of_suit[i] :
self.diamonds.append(Diamond(card))
i+=1
if string[-1]!='C' :
for card in tab_of_suit[i] :
self.clubs.append(Club(card))
self.order()
return self
def order(self) -> None :
self.spades.sort(reverse=True)
self.hearts.sort(reverse=True)
self.diamonds.sort(reverse=True)
self.clubs.sort(reverse=True)
def __str__(self) :
string=""
for suit in [self.spades,self.hearts,self.diamonds,self.clubs] :
for card in suit :
string += card.__str__()
string+='\n'
return string
def print_as_lin(self) -> str:
string=""
for i,suit in enumerate([self.spades,self.hearts,self.diamonds,self.clubs]) :
string += SUITS [i]
for card in suit :
string += card.__str__()
return string
def print_as_pbn(self) -> str :
string=""
for i,suit in enumerate([self.spades,self.hearts,self.diamonds,self.clubs]) :
for card in suit :
string += card.__str__()
string+='.'
return string[:-1]
def append(self, card : Card) :
if type(card) is Spade :
self.spades.append(card)
if type(card) is Heart :
self.hearts.append(card)
if type(card) is Diamond :
self.diamonds.append(card)
if type(card) is Club :
self.clubs.append(card)
@dataclass
class Diagramm() :
south : Hand = field(init=False)
north : Hand = field(init=False)
west : Hand = field(init=False)
east : Hand = field(init=False)
def __str__(self) :
string = ""
for hand in [self.north,self.south,self.west,self.east] :
string += hand.__str__() +"\n"
return string
def clear(self) : #return self
for hand in [self.north,self.south,self.west,self.east] :
hand.clear()
return self
def init_from_pbn(self, string : str,dealer : str) :
""" Create a diagramm from this syntax : 'N:752.Q864.84.AT62 A98.AT9.Q753.J98 KT.KJ73.JT.K7543 QJ643.52.AK962.Q'"""
string = string[3:-2]
hand_list = string.split(" ")
self.north = Hand().create_from_string(hand_list[0])
self.east = Hand().create_from_string(hand_list[1])
self.south = Hand().create_from_string(hand_list[2])
self.west = Hand().create_from_string(hand_list[3])
if dealer == "N" :
pass
if dealer == "E" :
self.rotate(1)
if dealer == "S" :
self.rotate(2)
if dealer == "W" :
self.rotate(3)
return self
def init_from_lin(self,string : str, dealer : str) :
"""Create a diagramm from this syntax : SK7HAQT632DK4CQ62,S82H98DAT632CKT43,S965HKJ5DQJ985CA5"""
hand_list = string.split(",")
self.south = Hand().init_from_lin(hand_list[0])
self.west = Hand().init_from_lin(hand_list[1])
self.north = Hand().init_from_lin(hand_list[2])
self.east = Hand().clear()
self.auto_complete()
return self
def rotate(self, rotato : int) :
temp=[]
for hand in [self.south,self.west,self.north,self.east] :
temp.append(hand)
for i in range(rotato) :
temp.insert(0, temp.pop())
self.south = temp[0]
self.west = temp[1]
self.north = temp[2]
self.east = temp[3]
def is_valid(self) -> bool :
list_of_cards = []
for hand in [self.north,self.south,self.west,self.east] :
for suit in hand.get_every_suit() :
for card in suit :
if card in list_of_cards :
print('Cette carte est en double !', card)
return False
else :
list_of_cards.append(card)
if len(list_of_cards) == 52 :
return True
else :
print('Le diagramme contient ', len(list_of_cards), ' cartes')
return False
def missing_cards(self) -> list :
list_of_cards = []
for hand in [self.north,self.south,self.west,self.east] :
for suit in hand.get_every_suit() :
for card in suit :
if card in list_of_cards :
print("Cette carte est en double",card)
raise error("Diagramme invalide")
else :
list_of_cards.append(card)
missing_cards = []
for card in total_deck :
if card not in list_of_cards :
missing_cards.append(card)
return missing_cards
def auto_complete(self) -> None :
missing_cards = self.missing_cards()
for hand in [self.north,self.south,self.west,self.east] :
while hand.len()<13 :
hand.append(missing_cards.pop())
if not self.is_valid() :
raise error("L'auto-complete n'est pas valide")
def print_as_lin(self) -> str :
string = ""
for hand in [self.south,self.west,self.north,self.east] :
string += hand.print_as_lin()
string += ","
return string[:-1]
def print_as_pbn(self) -> str :
string = "S:"
for hand in [self.south,self.west,self.north,self.east] :
string += hand.print_as_pbn()
string += " "
return string[:-1]
if __name__ == '__main__':
pass
|
from setuptools import setup
setup(
name='pytorch_tps',
description='Thin plate spline interpolation for PyTorch',
version="0.0.1",
author='Yucheol Jung',
author_email='ycjung@postech.ac.kr',
packages=['pytorch_tps'],
url='https://github.com/ycjungSubhuman/pytorch_tps',
)
|
# Copyright (c) 2010 Leif Johnson <leif@leifjohnson.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
A Python library for reading and writing C3D files.
Header class
============
.. autoclass:: Header
:members:
Header class
============
.. autoclass:: Group
:members:
Manager class
=============
.. autoclass:: Manager
:members:
Reader class
=============
.. autoclass:: Reader
:members:
Writer class
============
.. autoclass:: Writer
:members:
Manager class
=============
.. autoclass:: Manager
:members:
"""
import array
import numpy
import struct
import logging
import operator
import cStringIO
class Header(object):
'''Header information from a C3D file.'''
BINARY_FORMAT = 'BBHHHHHfHHf270sHH214s'
def __init__(self, handle=None):
self.label_block = 0
self.parameter_block = 2
self.data_block = 3
self.point_count = 50
self.analog_count = 0
self.first_frame = 1
self.last_frame = 1
self.sample_per_frame = 0
self.frame_rate = 60.0
self.max_gap = 0
self.scale_factor = -1.0
self.long_event_labels = False
if handle:
self.read(handle)
def write(self, handle):
'''Write binary header data to a file handle.
This method writes exactly 512 bytes to the beginning of the file.
'''
handle.seek(0)
handle.write(struct.pack(self.BINARY_FORMAT,
self.parameter_block,
0x50,
self.point_count,
self.analog_count,
self.first_frame,
self.last_frame,
self.max_gap,
self.scale_factor,
self.data_block,
self.sample_per_frame,
self.frame_rate,
'',
self.long_event_labels and 0x3039 or 0x0,
self.label_block,
''))
logging.info('''wrote C3D header information:
parameter_block: %(parameter_block)s
point_count: %(point_count)s
analog_count: %(analog_count)s
first_frame: %(first_frame)s
last_frame: %(last_frame)s
max_gap: %(max_gap)s
scale_factor: %(scale_factor)s
data_block: %(data_block)s
sample_per_frame: %(sample_per_frame)s
frame_rate: %(frame_rate)s
long_event_labels: %(long_event_labels)s
label_block: %(label_block)s''' % self.__dict__)
def read(self, handle):
'''Read and parse binary header data from a file handle.
This method reads exactly 512 bytes from the beginning of the file.
'''
handle.seek(0)
(self.parameter_block,
_,
self.point_count,
self.analog_count,
self.first_frame,
self.last_frame,
self.max_gap,
self.scale_factor,
self.data_block,
self.sample_per_frame,
self.frame_rate,
_,
self.long_event_labels,
self.label_block,
_) = struct.unpack(self.BINARY_FORMAT, handle.read(512))
logging.info('''loaded C3D header information:
parameter_block: %(parameter_block)s
point_count: %(point_count)s
analog_count: %(analog_count)s
first_frame: %(first_frame)s
last_frame: %(last_frame)s
max_gap: %(max_gap)s
scale_factor: %(scale_factor)s
data_block: %(data_block)s
sample_per_frame: %(sample_per_frame)s
frame_rate: %(frame_rate)s
long_event_labels: %(long_event_labels)s
label_block: %(label_block)s''' % self.__dict__)
class Param(object):
'''We represent a single named parameter from a C3D file.'''
def __init__(self,
name,
desc='',
data_size=1,
dimensions=None,
bytes=None,
handle=None):
'''Set up a new parameter with at least a name.
name: The name of the parameter.
desc: The description of the parameter.
data_size: The number of bytes that are in the binary representation of
the data for this parameter. Use -1 if this parameter represents
string data.
dimensions: The dimensions of the data for this parameter. This is
primarily used for string data ; if you want to use strings, they must
all be passed in the "bytes" variable as a space-delimited string.
Each field in the string must be the same length, and that length must
be passed in the dimensions list.
bytes: The raw bytes for this parameter. Use struct.pack() to construct
this value, or just pass the raw string data for string parameters.
handle: If provided, the data for the parameter will be read from this
file handle.
'''
self.name = name
self.desc = desc
self.negative_data_size = data_size < 0
self.data_size = abs(data_size)
self.dimensions = dimensions or []
self.bytes = bytes
if handle:
self.read(handle)
def __repr__(self):
return '<Param: %s>' % self.desc
def binary_size(self):
'''Return the number of bytes needed to store this parameter.'''
return (
1 + # group_id
2 + # next offset marker
1 + len(self.name) + # size of name and name bytes
1 + # data size
1 + len(self.dimensions) + # size of dimensions and dimension bytes
reduce(operator.mul, self.dimensions, 1) * self.data_size + # data
1 + len(self.desc) # size of desc and desc bytes
)
def write(self, handle):
'''Write binary data for this parameter to a file handle.
This writes data at the current position in the file.
'''
size = self.data_size
if self.negative_data_size:
size = -size
handle.write(struct.pack('b', size))
handle.write(struct.pack('B', len(self.dimensions)))
handle.write(struct.pack('B' * len(self.dimensions), *self.dimensions))
if self.bytes:
handle.write(self.bytes)
handle.write(struct.pack('B', len(self.desc)))
handle.write(self.desc)
logging.info('''wrote C3D parameter information:
name: %(name)s
desc: %(desc)s
data_size: %(data_size)s
dimensions: %(dimensions)s
bytes: %(bytes)r''' % self.__dict__)
def read(self, handle):
'''Read binary data for this parameter from a file handle.
This reads exactly enough data from the current position in the file to
initialize the parameter.
'''
self.data_size, = struct.unpack('b', handle.read(1))
if self.data_size < 0:
self.negative_data_size = True
self.data_size = abs(self.data_size)
count, = struct.unpack('B', handle.read(1))
self.dimensions = [
struct.unpack('B', handle.read(1))[0] for _ in xrange(count)]
count = reduce(operator.mul, self.dimensions, 1)
self.bytes = None
if self.data_size * count:
self.bytes = handle.read(self.data_size * count)
else:
logging.debug('zero data_size * count !')
size, = struct.unpack('B', handle.read(1))
self.desc = size and handle.read(size) or ''
logging.info('''loaded C3D parameter information:
name: %(name)s
desc: %(desc)s
data_size: %(data_size)s
dimensions: %(dimensions)s
bytes: %(bytes)r''' % self.__dict__)
class Group(object):
'''A group of parameters from a C3D file.'''
def __init__(self, name=None, desc=None):
self.name = name
self.desc = desc
self.params = {}
def __repr__(self):
return '<Group: %s>' % self.desc
def add_param(self, name, **kwargs):
self.params[name.upper()] = Param(name.upper(), **kwargs)
logging.info('added parameter %s: %s', name, kwargs)
def binary_size(self):
'''Return the number of bytes to store this group and its parameters.'''
return (
1 + # group_id
1 + len(self.name) + # size of name and name bytes
2 + # next offset marker
1 + len(self.desc) + # size of desc and desc bytes
sum(p.binary_size() for p in self.params.itervalues()))
def get_int8(self, key):
return struct.unpack('b', self.params[key].bytes)[0]
def get_uint8(self, key):
return struct.unpack('B', self.params[key].bytes)[0]
def get_int16(self, key):
return struct.unpack('h', self.params[key].bytes)[0]
def get_uint16(self, key):
return struct.unpack('H', self.params[key].bytes)[0]
def get_int32(self, key):
return struct.unpack('i', self.params[key].bytes)[0]
def get_uint32(self, key):
return struct.unpack('I', self.params[key].bytes)[0]
def get_float(self, key):
return struct.unpack('f', self.params[key].bytes)[0]
def get_string(self, key, offset=0):
return self.params[key].bytes.split()[offset]
class Manager(object):
'''A base class for managing C3D file metadata.'''
def __init__(self, header=None, groups=None):
self.header = header or Header()
self._groups = groups or {}
def check_group(self, group_id, name=None, desc=None):
'''Add a new parameter group.'''
group = self._groups.get(group_id)
if group is None:
logging.info('added C3D parameter group #%d: %s: %s',
group_id, name, desc)
group = self._groups[group_id] = Group(name, desc)
else:
logging.info('using C3D parameter group %s: %s',
group.name, group.desc)
if name is not None:
name = name.upper()
if name in self._groups:
raise NameError('group name %s was used more than once' % name)
self._groups[name] = group
group.name = name
group.desc = desc
return group
def group(self, name):
'''Get the parameter group with a given name.'''
return self._groups.get(name.upper(), None)
def groups(self):
'''Get all the (name, group) pairs in our file.'''
return self._groups.iteritems()
def parameter_blocks(self):
'''Compute the size (in 512B blocks) of the parameter section.'''
bytes = 4
for name, group in self._groups.iteritems():
bytes += group.binary_size()
blocks, overflow = divmod(bytes, 512)
if overflow:
blocks += 1
return blocks
def frame_rate(self):
return self.header.frame_rate
def num_points(self):
return self.group('POINT').get_uint16('USED')
def num_analog(self):
return self.group('ANALOG').get_uint16('USED')
def points_per_frame(self):
return self.group('POINT').get_uint16('USED')
def analog_per_frame(self):
return self.group('ANALOG').get_uint16('USED')
def start_field(self):
return self.group('TRIAL').get_uint32('ACTUAL_START_FIELD')
def end_field(self):
return self.group('TRIAL').get_uint32('ACTUAL_END_FIELD')
class Reader(Manager):
'''This class provides methods for reading the data in a C3D file.
A C3D file contains metadata and frame-based data describing 3D motion.
You can iterate over the frames in the file by calling
"read_frames(handle)" after construction:
>>> r = c3d.Reader(open('capture.c3d', 'rb'))
>>> for points, analog in r.read_frames():
... print points.shape, 'points in this frame'
... frames.append(points, analog)
'''
def __init__(self, handle):
'''Initialize this C3D file by reading header and parameter data.
'''
super(Reader, self).__init__(Header(handle))
self._handle = handle
self._read_metadata()
def _read_metadata(self):
'''Read group and parameter data from our file handle.'''
self._handle.seek((self.header.parameter_block - 1) * 512)
# metadata header
buf = self._handle.read(4)
_, _, parameter_blocks, processor = struct.unpack('BBBB', buf)
if processor != 84:
raise ValueError('We only read Intel C3D files.')
# read all metadata in a chunk, then process each chunk (to avoid block
# boundary issues).
bytes = self._handle.read(512 * parameter_blocks)
while bytes:
buf = cStringIO.StringIO(bytes)
chars_in_name, group_id = struct.unpack('bb', buf.read(2))
if group_id == 0 or chars_in_name == 0:
break
name = buf.read(abs(chars_in_name)).upper()
offset_to_next, = struct.unpack('h', buf.read(2))
if group_id < 0:
group_id = abs(group_id)
size, = struct.unpack('B', buf.read(1))
desc = size and buf.read(size) or ''
g = self.check_group(group_id, name, desc)
logging.debug('%s group takes up %d bytes', name,
g.binary_size())
else:
g = self.check_group(group_id)
g.add_param(name, handle=buf)
logging.debug('%s parameter takes up %d bytes', name,
g.params[name].binary_size())
bytes = bytes[2 + abs(chars_in_name) + offset_to_next:]
logging.debug('consumed %d bytes of metadata',
512 * parameter_blocks - len(bytes))
logging.info('read %d parameter groups', len(self._groups) // 2)
def read_frames(self):
'''Iterate over the data frames from our C3D file handle.
This generates a sequence of (points, analog) ordered pairs, one
ordered pair per frame. The first element of each frame contains a numpy
array of 4D "points" and the second element of each frame contains a
numpy array of 1D "analog" values that were probably recorded
simultaneously. The four dimensions in the point data are typically
(x, y, z) and a "confidence" estimate for the point.
'''
# find out where we seek to start reading frame data.
start_block = self.group('POINT').get_uint16('DATA_START')
if start_block != self.header.data_block:
logging.info('start_block %d != data_block %d',
start_block, self.header.data_block)
# read frame and analog data in either float or int format.
format = 'fi'[self.group('POINT').get_float('SCALE') >= 0]
ppf = self.points_per_frame()
apf = self.analog_per_frame()
self._handle.seek((self.header.data_block - 1) * 512)
start = self._handle.tell()
f = 0
for f in xrange(self.end_field() - self.start_field() + 1):
points = array.array(format)
points.fromfile(self._handle, 4 * ppf)
analog = array.array(format)
analog.fromfile(self._handle, apf)
yield (numpy.array(points).reshape((ppf, 4)), numpy.array(analog))
if f and not f % 10000:
logging.debug('consumed %d frames in %dkB of frame data',
f, (self._handle.tell() - start) / 1000)
logging.info('iterated over %d frames', f)
class Writer(Manager):
'''This class manages the task of writing metadata and frames to a C3D file.
>>> r = c3d.Reader(open('data.c3d', 'rb'))
>>> frames = smooth_frames(r.read_frames())
>>> w = c3d.Writer(open('smoothed.c3d', 'wb'))
>>> w.write_from_reader(frames, r)
'''
def __init__(self, handle):
super(Writer, self).__init__()
self._handle = handle
def _pad_block(self):
'''Pad the file with 0s to the end of the next block boundary.'''
extra = self._handle.tell() % 512
if extra:
logging.debug('padding with %d zeros', 512 - extra)
self._handle.write('\x00' * (512 - extra))
def write_metadata(self):
'''Write metadata for this file to our file handle.'''
# header
self.header.write(self._handle)
self._pad_block()
assert self._handle.tell() == 512
logging.debug('produced %d bytes of header data', self._handle.tell())
# groups
self._handle.write(struct.pack('BBBB', 0, 0, self.parameter_blocks(), 84))
id_groups = sorted((i, g) for i, g in self.groups() if isinstance(i, int))
for group_id, group in id_groups:
self._write_group(group_id, group)
# padding
self._pad_block()
while self._handle.tell() != 512 * (self.header.data_block - 1):
self._handle.write('\x00' * 512)
logging.debug('produced %d bytes of metadata', self._handle.tell())
def _write_group(self, group_id, group):
'''Write a single parameter group, with parameters, to our file handle.
group_id: The numerical ID of the group.
group: The Group object to write to the handle.
'''
logging.info('writing C3D parameter group #%d: %s: %s',
group_id, group.name, group.desc)
self._handle.write(struct.pack('bb', len(group.name), -group_id))
self._handle.write(group.name)
self._handle.write(struct.pack('h', 3 + len(group.desc)))
self._handle.write(struct.pack('B', len(group.desc)))
self._handle.write(group.desc)
logging.debug('writing group info yields offset %d', self._handle.tell())
for name, param in group.params.iteritems():
self._handle.write(struct.pack('bb', len(name), group_id))
self._handle.write(name)
self._handle.write(struct.pack('h', param.binary_size() - 2 - len(name)))
param.write(self._handle)
logging.debug('writing %d bytes yields offset %d',
4 + len(name) + param.binary_size(), self._handle.tell())
logging.debug('group %s ends at byte offset %d',
group.name, self._handle.tell())
def write_frames(self, frames):
'''Write the given list of frame data to our file handle.
frames: A sequence of (points, analog) tuples, each containing data for
one frame.
'''
assert self._handle.tell() == 512 * (self.header.data_block - 1)
format = 'fi'[self.group('POINT').get_float('SCALE') >= 0]
for p, a in frames:
point = array.array(format)
point.extend(p.flatten())
point.tofile(self._handle)
analog = array.array(format)
analog.extend(a)
analog.tofile(self._handle)
self._pad_block()
def write_like_phasespace(self, frames, frame_count,
point_frame_rate=480.0,
analog_frame_rate=0.0,
point_scale_factor=-1.0,
point_units='mm ',
gen_scale=1.0,
):
'''Write a set of frames to a file so it looks like Phasespace wrote it.
frames: The sequence of frames to write.
frame_count: The number of frames to write.
point_frame_rate: The frame rate of the data.
analog_frame_rate: The number of analog samples per frame.
point_scale_factor: The scale factor for point data.
point_units: The units that the point numbers represent.
'''
try:
points, analog = iter(frames).next()
except StopIteration:
return
# POINT group
ppf = len(points)
point_group = self.check_group(1, 'POINT', 'POINT group')
point_group.add_param('USED', desc='Number of 3d markers',
data_size=2,
bytes=struct.pack('H', ppf))
point_group.add_param('FRAMES', desc='frame count',
data_size=2,
bytes=struct.pack('H', min(65535, frame_count)))
point_group.add_param('DATA_START', desc='data block number',
data_size=2,
bytes=struct.pack('H', 0))
point_group.add_param('SCALE', desc='3d scale factor',
data_size=4,
bytes=struct.pack('f', point_scale_factor))
point_group.add_param('RATE', desc='3d data capture rate',
data_size=4,
bytes=struct.pack('f', point_frame_rate))
point_group.add_param('X_SCREEN', desc='X_SCREEN parameter',
data_size=-1,
dimensions=[2],
bytes='+X')
point_group.add_param('Y_SCREEN', desc='Y_SCREEN parameter',
data_size=-1,
dimensions=[2],
bytes='+Z')
point_group.add_param('UNITS', desc='3d data units',
data_size=-1,
dimensions=[len(point_units)],
bytes=point_units)
point_group.add_param('LABELS', desc='labels',
data_size=-1,
dimensions=[5, ppf],
bytes=''.join('M%03d ' % i for i in xrange(ppf)))
point_group.add_param('DESCRIPTIONS', desc='descriptions',
data_size=-1,
dimensions=[16, ppf],
bytes=' ' * 16 * ppf)
# ANALOG group
apf = len(analog)
analog_group = self.check_group(2, 'ANALOG', 'ANALOG group')
analog_group.add_param('USED', desc='analog channel count',
data_size=2,
bytes=struct.pack('H', apf))
analog_group.add_param('RATE', desc='analog frame rate',
data_size=4,
bytes=struct.pack('f', analog_frame_rate))
analog_group.add_param('GEN_SCALE', desc='analog general scale factor',
data_size=4,
bytes=struct.pack('f', gen_scale))
analog_group.add_param('SCALE', desc='analog channel scale factors',
data_size=4,
dimensions=[0])
analog_group.add_param('OFFSET', desc='analog channel offsets',
data_size=2,
dimensions=[0])
# TRIAL group
trial_group = self.check_group(3, 'TRIAL', 'TRIAL group')
trial_group.add_param('ACTUAL_START_FIELD', desc='actual start frame',
data_size=2,
dimensions=[2],
bytes=struct.pack('I', 1))
trial_group.add_param('ACTUAL_END_FIELD', desc='actual end frame',
data_size=2,
dimensions=[2],
bytes=struct.pack('I', frame_count))
# sync parameter information to header.
blocks = self.parameter_blocks()
point_group.params['DATA_START'].bytes = struct.pack('H', 2 + blocks)
self.header.data_block = 2 + blocks
self.header.frame_rate = point_frame_rate
self.header.last_frame = min(frame_count, 65535)
self.header.point_count = ppf
self.header.analog_count = apf
self.write_metadata()
self.write_frames(frames)
def write_from_reader(self, frames, reader):
'''Write a file with the same metadata and number of frames as a Reader.
frames: A sequence of frames to write.
reader: Copy metadata from this reader to the output file.
'''
self.write_like_phasespace(frames, reader.end_field(), reader.frame_rate())
if __name__ =='__main__':
r = Reader(open('testStatic.c3d', 'rb'))
|
# -*- coding: utf-8 -*-
import json
import os
import re
from distutils.version import LooseVersion
import pip
from django.core.management.base import BaseCommand, CommandError
try:
from pip._internal.download import PipSession
from pip._internal.req.req_file import parse_requirements
from pip._internal.utils.misc import get_installed_distributions
except ImportError:
# pip < 10
try:
from pip import get_installed_distributions
from pip.download import PipSession
from pip.req import parse_requirements
except ImportError:
raise CommandError("Pip version 6 or higher is required")
from django_extensions.management.color import color_style
from django_extensions.management.utils import signalcommand
try:
from urllib.parse import urlparse
from urllib.error import HTTPError
from urllib.request import Request, urlopen
from xmlrpc.client import ServerProxy
except ImportError:
# Python 2
from urlparse import urlparse # type: ignore
from urllib2 import HTTPError, Request, urlopen # type: ignore
from xmlrpclib import ServerProxy # type: ignore
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
class Command(BaseCommand):
help = "Scan pip requirement files for out-of-date packages."
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
"-t", "--github-api-token", action="store",
dest="github_api_token", help="A github api authentication token."
)
parser.add_argument(
"-r", "--requirement", action="append", dest="requirements",
default=[], metavar="FILENAME",
help="Check all the packages listed in the given requirements "
"file. This option can be used multiple times."
),
parser.add_argument(
"-n", "--newer", action="store_true", dest="show_newer",
help="Also show when newer version then available is installed."
)
@signalcommand
def handle(self, *args, **options):
self.style = color_style()
self.options = options
if options["requirements"]:
req_files = options["requirements"]
elif os.path.exists("requirements.txt"):
req_files = ["requirements.txt"]
elif os.path.exists("requirements"):
req_files = [
"requirements/{0}".format(f) for f in os.listdir("requirements")
if os.path.isfile(os.path.join("requirements", f)) and f.lower().endswith(".txt")
]
elif os.path.exists("requirements-dev.txt"):
req_files = ["requirements-dev.txt"]
elif os.path.exists("requirements-prod.txt"):
req_files = ["requirements-prod.txt"]
else:
raise CommandError("Requirements file(s) not found")
self.reqs = {}
with PipSession() as session:
for filename in req_files:
for req in parse_requirements(filename, session=session):
name = req.name if req.name else req.link.filename
# url attribute changed to link in pip version 6.1.0 and above
if LooseVersion(pip.__version__) > LooseVersion('6.0.8'):
self.reqs[name] = {
"pip_req": req,
"url": req.link,
}
else:
self.reqs[name] = {
"pip_req": req,
"url": req.url,
}
if options["github_api_token"]:
self.github_api_token = options["github_api_token"]
elif os.environ.get("GITHUB_API_TOKEN"):
self.github_api_token = os.environ.get("GITHUB_API_TOKEN")
else:
self.github_api_token = None # only 50 requests per hour
self.check_pypi()
if HAS_REQUESTS:
self.check_github()
else:
self.stdout.write(self.style.ERROR("Cannot check github urls. The requests library is not installed. ( pip install requests )"))
self.check_other()
def _urlopen_as_json(self, url, headers=None):
"""Shorcut for return contents as json"""
req = Request(url, headers=headers)
return json.loads(urlopen(req).read())
def _is_stable(self, version):
return not re.search(r'([ab]|rc|dev)\d+$', str(version))
def _available_version(self, dist_version, available):
if self._is_stable(dist_version):
stable = [v for v in available if self._is_stable(LooseVersion(v))]
if stable:
return LooseVersion(stable[0])
return LooseVersion(available[0]) if available else None
def check_pypi(self):
"""If the requirement is frozen to pypi, check for a new version."""
for dist in get_installed_distributions():
name = dist.project_name
if name in self.reqs.keys():
self.reqs[name]["dist"] = dist
pypi = ServerProxy("https://pypi.python.org/pypi")
for name, req in list(self.reqs.items()):
if req["url"]:
continue # skipping github packages.
elif "dist" in req:
dist = req["dist"]
dist_version = LooseVersion(dist.version)
available = pypi.package_releases(req["pip_req"].name, True) or pypi.package_releases(req["pip_req"].name.replace('-', '_'), True)
available_version = self._available_version(dist_version, available)
if not available_version:
msg = self.style.WARN("release is not on pypi (check capitalization and/or --extra-index-url)")
elif self.options['show_newer'] and dist_version > available_version:
msg = self.style.INFO("{0} available (newer installed)".format(available_version))
elif available_version > dist_version:
msg = self.style.INFO("{0} available".format(available_version))
else:
msg = "up to date"
del self.reqs[name]
continue
pkg_info = self.style.BOLD("{dist.project_name} {dist.version}".format(dist=dist))
else:
msg = "not installed"
pkg_info = name
self.stdout.write("{pkg_info:40} {msg}".format(pkg_info=pkg_info, msg=msg))
del self.reqs[name]
def check_github(self):
"""
If the requirement is frozen to a github url, check for new commits.
API Tokens
----------
For more than 50 github api calls per hour, pipchecker requires
authentication with the github api by settings the environemnt
variable ``GITHUB_API_TOKEN`` or setting the command flag
--github-api-token='mytoken'``.
To create a github api token for use at the command line::
curl -u 'rizumu' -d '{"scopes":["repo"], "note":"pipchecker"}' https://api.github.com/authorizations
For more info on github api tokens:
https://help.github.com/articles/creating-an-oauth-token-for-command-line-use
http://developer.github.com/v3/oauth/#oauth-authorizations-api
Requirement Format
------------------
Pipchecker gets the sha of frozen repo and checks if it is
found at the head of any branches. If it is not found then
the requirement is considered to be out of date.
Therefore, freezing at the commit hash will provide the expected
results, but if freezing at a branch or tag name, pipchecker will
not be able to determine with certainty if the repo is out of date.
Freeze at the commit hash (sha)::
git+git://github.com/django/django.git@393c268e725f5b229ecb554f3fac02cfc250d2df#egg=Django
https://github.com/django/django/archive/393c268e725f5b229ecb554f3fac02cfc250d2df.tar.gz#egg=Django
https://github.com/django/django/archive/393c268e725f5b229ecb554f3fac02cfc250d2df.zip#egg=Django
Freeze with a branch name::
git+git://github.com/django/django.git@master#egg=Django
https://github.com/django/django/archive/master.tar.gz#egg=Django
https://github.com/django/django/archive/master.zip#egg=Django
Freeze with a tag::
git+git://github.com/django/django.git@1.5b2#egg=Django
https://github.com/django/django/archive/1.5b2.tar.gz#egg=Django
https://github.com/django/django/archive/1.5b2.zip#egg=Django
Do not freeze::
git+git://github.com/django/django.git#egg=Django
"""
for name, req in list(self.reqs.items()):
req_url = req["url"]
if not req_url:
continue
req_url = str(req_url)
if req_url.startswith("git") and "github.com/" not in req_url:
continue
if req_url.endswith((".tar.gz", ".tar.bz2", ".zip")):
continue
headers = {
"content-type": "application/json",
}
if self.github_api_token:
headers["Authorization"] = "token {0}".format(self.github_api_token)
try:
path_parts = urlparse(req_url).path.split("#", 1)[0].strip("/").rstrip("/").split("/")
if len(path_parts) == 2:
user, repo = path_parts
elif 'archive' in path_parts:
# Supports URL of format:
# https://github.com/django/django/archive/master.tar.gz#egg=Django
# https://github.com/django/django/archive/master.zip#egg=Django
user, repo = path_parts[:2]
repo += '@' + path_parts[-1].replace('.tar.gz', '').replace('.zip', '')
else:
self.style.ERROR("\nFailed to parse %r\n" % (req_url, ))
continue
except (ValueError, IndexError) as e:
self.stdout.write(self.style.ERROR("\nFailed to parse %r: %s\n" % (req_url, e)))
continue
try:
test_auth = requests.get("https://api.github.com/django/", headers=headers).json()
except HTTPError as e:
self.stdout.write("\n%s\n" % str(e))
return
if "message" in test_auth and test_auth["message"] == "Bad credentials":
self.stdout.write(self.style.ERROR("\nGithub API: Bad credentials. Aborting!\n"))
return
elif "message" in test_auth and test_auth["message"].startswith("API Rate Limit Exceeded"):
self.stdout.write(self.style.ERROR("\nGithub API: Rate Limit Exceeded. Aborting!\n"))
return
frozen_commit_sha = None
if ".git" in repo:
repo_name, frozen_commit_full = repo.split(".git")
if frozen_commit_full.startswith("@"):
frozen_commit_sha = frozen_commit_full[1:]
elif "@" in repo:
repo_name, frozen_commit_sha = repo.split("@")
if frozen_commit_sha is None:
msg = self.style.ERROR("repo is not frozen")
if frozen_commit_sha:
branch_url = "https://api.github.com/repos/{0}/{1}/branches".format(user, repo_name)
branch_data = requests.get(branch_url, headers=headers).json()
frozen_commit_url = "https://api.github.com/repos/{0}/{1}/commits/{2}".format(
user, repo_name, frozen_commit_sha
)
frozen_commit_data = requests.get(frozen_commit_url, headers=headers).json()
if "message" in frozen_commit_data and frozen_commit_data["message"] == "Not Found":
msg = self.style.ERROR("{0} not found in {1}. Repo may be private.".format(frozen_commit_sha[:10], name))
elif frozen_commit_data["sha"] in [branch["commit"]["sha"] for branch in branch_data]:
msg = self.style.BOLD("up to date")
else:
msg = self.style.INFO("{0} is not the head of any branch".format(frozen_commit_data["sha"][:10]))
if "dist" in req:
pkg_info = "{dist.project_name} {dist.version}".format(dist=req["dist"])
elif frozen_commit_sha is None:
pkg_info = name
else:
pkg_info = "{0} {1}".format(name, frozen_commit_sha[:10])
self.stdout.write("{pkg_info:40} {msg}".format(pkg_info=pkg_info, msg=msg))
del self.reqs[name]
def check_other(self):
"""
If the requirement is frozen somewhere other than pypi or github, skip.
If you have a private pypi or use --extra-index-url, consider contributing
support here.
"""
if self.reqs:
self.stdout.write(self.style.ERROR("\nOnly pypi and github based requirements are supported:"))
for name, req in self.reqs.items():
if "dist" in req:
pkg_info = "{dist.project_name} {dist.version}".format(dist=req["dist"])
elif "url" in req:
pkg_info = "{url}".format(url=req["url"])
else:
pkg_info = "unknown package"
self.stdout.write(self.style.BOLD("{pkg_info:40} is not a pypi or github requirement".format(pkg_info=pkg_info)))
|
"""Handles mapping between color names and ANSI codes and determining auto color codes."""
import sys
from collections import Mapping
BASE_CODES = {
'/all': 0, 'b': 1, 'f': 2, 'i': 3, 'u': 4, 'flash': 5, 'outline': 6, 'negative': 7, 'invis': 8, 'strike': 9,
'/b': 22, '/f': 22, '/i': 23, '/u': 24, '/flash': 25, '/outline': 26, '/negative': 27, '/invis': 28,
'/strike': 29, '/fg': 39, '/bg': 49,
'black': 30, 'red': 31, 'green': 32, 'yellow': 33, 'blue': 34, 'magenta': 35, 'cyan': 36, 'white': 37,
'bgblack': 40, 'bgred': 41, 'bggreen': 42, 'bgyellow': 43, 'bgblue': 44, 'bgmagenta': 45, 'bgcyan': 46,
'bgwhite': 47,
'hiblack': 90, 'hired': 91, 'higreen': 92, 'hiyellow': 93, 'hiblue': 94, 'himagenta': 95, 'hicyan': 96,
'hiwhite': 97,
'hibgblack': 100, 'hibgred': 101, 'hibggreen': 102, 'hibgyellow': 103, 'hibgblue': 104, 'hibgmagenta': 105,
'hibgcyan': 106, 'hibgwhite': 107,
'autored': None, 'autoblack': None, 'automagenta': None, 'autowhite': None, 'autoblue': None, 'autoyellow': None,
'autogreen': None, 'autocyan': None,
'autobgred': None, 'autobgblack': None, 'autobgmagenta': None, 'autobgwhite': None, 'autobgblue': None,
'autobgyellow': None, 'autobggreen': None, 'autobgcyan': None,
'/black': 39, '/red': 39, '/green': 39, '/yellow': 39, '/blue': 39, '/magenta': 39, '/cyan': 39, '/white': 39,
'/hiblack': 39, '/hired': 39, '/higreen': 39, '/hiyellow': 39, '/hiblue': 39, '/himagenta': 39, '/hicyan': 39,
'/hiwhite': 39,
'/bgblack': 49, '/bgred': 49, '/bggreen': 49, '/bgyellow': 49, '/bgblue': 49, '/bgmagenta': 49, '/bgcyan': 49,
'/bgwhite': 49, '/hibgblack': 49, '/hibgred': 49, '/hibggreen': 49, '/hibgyellow': 49, '/hibgblue': 49,
'/hibgmagenta': 49, '/hibgcyan': 49, '/hibgwhite': 49,
'/autored': 39, '/autoblack': 39, '/automagenta': 39, '/autowhite': 39, '/autoblue': 39, '/autoyellow': 39,
'/autogreen': 39, '/autocyan': 39,
'/autobgred': 49, '/autobgblack': 49, '/autobgmagenta': 49, '/autobgwhite': 49, '/autobgblue': 49,
'/autobgyellow': 49, '/autobggreen': 49, '/autobgcyan': 49,
}
class ANSICodeMapping(Mapping):
"""Read-only dictionary, resolves closing tags and automatic colors. Iterates only used color tags.
:cvar bool DISABLE_COLORS: Disable colors (strip color codes).
:cvar bool LIGHT_BACKGROUND: Use low intensity color codes.
"""
DISABLE_COLORS = not (sys.stdout.isatty() or sys.stderr.isatty()) # Disable colors when piped to another program.
LIGHT_BACKGROUND = False
def __init__(self, value_markup):
"""Constructor.
:param str value_markup: String with {color} tags.
"""
self.whitelist = [k for k in BASE_CODES if '{' + k + '}' in value_markup]
def __getitem__(self, item):
"""Return value for key or None if colors are disabled.
:param str item: Key.
:return: Color code integer.
:rtype: int
"""
if item not in self.whitelist:
raise KeyError(item)
if self.DISABLE_COLORS:
return None
return getattr(self, item, BASE_CODES[item])
def __iter__(self):
"""Iterate dictionary."""
return iter(self.whitelist)
def __len__(self):
"""Dictionary length."""
return len(self.whitelist)
@classmethod
def disable_all_colors(cls):
"""Disable all colors. Strips any color tags or codes."""
cls.DISABLE_COLORS = True
@classmethod
def enable_all_colors(cls):
"""Enable all colors. Strips any color tags or codes."""
cls.DISABLE_COLORS = False
@classmethod
def set_dark_background(cls):
"""Choose dark colors for all 'auto'-prefixed codes for readability on light backgrounds."""
cls.LIGHT_BACKGROUND = False
@classmethod
def set_light_background(cls):
"""Choose dark colors for all 'auto'-prefixed codes for readability on light backgrounds."""
cls.LIGHT_BACKGROUND = True
@property
def autoblack(self):
"""Return automatic black foreground color depending on background color."""
return BASE_CODES['black' if ANSICodeMapping.LIGHT_BACKGROUND else 'hiblack']
@property
def autored(self):
"""Return automatic red foreground color depending on background color."""
return BASE_CODES['red' if ANSICodeMapping.LIGHT_BACKGROUND else 'hired']
@property
def autogreen(self):
"""Return automatic green foreground color depending on background color."""
return BASE_CODES['green' if ANSICodeMapping.LIGHT_BACKGROUND else 'higreen']
@property
def autoyellow(self):
"""Return automatic yellow foreground color depending on background color."""
return BASE_CODES['yellow' if ANSICodeMapping.LIGHT_BACKGROUND else 'hiyellow']
@property
def autoblue(self):
"""Return automatic blue foreground color depending on background color."""
return BASE_CODES['blue' if ANSICodeMapping.LIGHT_BACKGROUND else 'hiblue']
@property
def automagenta(self):
"""Return automatic magenta foreground color depending on background color."""
return BASE_CODES['magenta' if ANSICodeMapping.LIGHT_BACKGROUND else 'himagenta']
@property
def autocyan(self):
"""Return automatic cyan foreground color depending on background color."""
return BASE_CODES['cyan' if ANSICodeMapping.LIGHT_BACKGROUND else 'hicyan']
@property
def autowhite(self):
"""Return automatic white foreground color depending on background color."""
return BASE_CODES['white' if ANSICodeMapping.LIGHT_BACKGROUND else 'hiwhite']
@property
def autobgblack(self):
"""Return automatic black background color depending on background color."""
return BASE_CODES['bgblack' if ANSICodeMapping.LIGHT_BACKGROUND else 'hibgblack']
@property
def autobgred(self):
"""Return automatic red background color depending on background color."""
return BASE_CODES['bgred' if ANSICodeMapping.LIGHT_BACKGROUND else 'hibgred']
@property
def autobggreen(self):
"""Return automatic green background color depending on background color."""
return BASE_CODES['bggreen' if ANSICodeMapping.LIGHT_BACKGROUND else 'hibggreen']
@property
def autobgyellow(self):
"""Return automatic yellow background color depending on background color."""
return BASE_CODES['bgyellow' if ANSICodeMapping.LIGHT_BACKGROUND else 'hibgyellow']
@property
def autobgblue(self):
"""Return automatic blue background color depending on background color."""
return BASE_CODES['bgblue' if ANSICodeMapping.LIGHT_BACKGROUND else 'hibgblue']
@property
def autobgmagenta(self):
"""Return automatic magenta background color depending on background color."""
return BASE_CODES['bgmagenta' if ANSICodeMapping.LIGHT_BACKGROUND else 'hibgmagenta']
@property
def autobgcyan(self):
"""Return automatic cyan background color depending on background color."""
return BASE_CODES['bgcyan' if ANSICodeMapping.LIGHT_BACKGROUND else 'hibgcyan']
@property
def autobgwhite(self):
"""Return automatic white background color depending on background color."""
return BASE_CODES['bgwhite' if ANSICodeMapping.LIGHT_BACKGROUND else 'hibgwhite']
def list_tags():
"""List the available tags.
:return: List of 4-item tuples: opening tag, closing tag, main ansi value, closing ansi value.
:rtype: list
"""
# Build reverse dictionary. Keys are closing tags, values are [closing ansi, opening tag, opening ansi].
reverse_dict = dict()
for tag, ansi in sorted(BASE_CODES.items()):
if tag.startswith('/'):
reverse_dict[tag] = [ansi, None, None]
else:
reverse_dict['/' + tag][1:] = [tag, ansi]
# Collapse
four_item_tuples = [(v[1], k, v[2], v[0]) for k, v in reverse_dict.items()]
# Sort.
def sorter(four_item):
"""Sort /all /fg /bg first, then b i u flash, then auto colors, then dark colors, finally light colors.
:param iter four_item: [opening tag, closing tag, main ansi value, closing ansi value]
:return Sorting weight.
:rtype: int
"""
if not four_item[2]: # /all /fg /bg
return four_item[3] - 200
if four_item[2] < 10 or four_item[0].startswith('auto'): # b f i u or auto colors
return four_item[2] - 100
return four_item[2]
four_item_tuples.sort(key=sorter)
return four_item_tuples
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
import logging
import os
import contextlib
import torch
from fairseq import metrics, options
from fairseq.data import (
Dictionary,
LanguagePairDataset,
RoundRobinZipDatasets,
TransformEosLangPairDataset,
)
from fairseq.models import FairseqMultiModel
from fairseq.tasks.translation import load_langpair_dataset
from . import FairseqTask, register_task
from fairseq import utils
logger = logging.getLogger(__name__)
def _lang_token(lang: str):
return '__{}__'.format(lang)
def _lang_token_index(dic: Dictionary, lang: str):
"""Return language token index."""
idx = dic.index(_lang_token(lang))
assert idx != dic.unk_index, \
'cannot find language token for lang {}'.format(lang)
return idx
@register_task('multilingual_translation')
class MultilingualTranslationTask(FairseqTask):
"""A task for training multiple translation models simultaneously.
We iterate round-robin over batches from multiple language pairs, ordered
according to the `--lang-pairs` argument.
The training loop is roughly:
for i in range(len(epoch)):
for lang_pair in args.lang_pairs:
batch = next_batch_for_lang_pair(lang_pair)
loss = criterion(model_for_lang_pair(lang_pair), batch)
loss.backward()
optimizer.step()
In practice, `next_batch_for_lang_pair` is abstracted in a FairseqDataset
(e.g., `RoundRobinZipDatasets`) and `model_for_lang_pair` is a model that
implements the `FairseqMultiModel` interface.
During inference it is required to specify a single `--source-lang` and
`--target-lang`, which indicates the inference langauge direction.
`--lang-pairs`, `--encoder-langtok`, `--decoder-langtok` have to be set to
the same value as training.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('data', metavar='DIR', help='path to data directory')
parser.add_argument('--lang-pairs', default=None, metavar='PAIRS',
help='comma-separated list of language pairs (in training order): en-de,en-fr,de-fr')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='source language (only needed for inference)')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='target language (only needed for inference)')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',
help='pad the source on the left (default: True)')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left (default: False)')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
parser.add_argument('--encoder-langtok', default=None, type=str, choices=['src', 'tgt'],
metavar='SRCTGT',
help='replace beginning-of-sentence in source sentence with source or target '
'language token. (src/tgt)')
parser.add_argument('--decoder-langtok', action='store_true',
help='replace beginning-of-sentence in target sentence with target language token')
# fmt: on
def __init__(self, args, dicts, training):
super().__init__(args)
self.dicts = dicts
self.training = training
if training:
self.lang_pairs = args.lang_pairs
else:
self.lang_pairs = ['{}-{}'.format(args.source_lang, args.target_lang)]
# eval_lang_pairs for multilingual translation is usually all of the
# lang_pairs. However for other multitask settings or when we want to
# optimize for certain languages we want to use a different subset. Thus
# the eval_lang_pairs class variable is provided for classes that extend
# this class.
self.eval_lang_pairs = self.lang_pairs
# model_lang_pairs will be used to build encoder-decoder model pairs in
# models.build_model(). This allows multitask type of sub-class can
# build models other than the input lang_pairs
self.model_lang_pairs = self.lang_pairs
self.langs = list(dicts.keys())
@classmethod
def setup_task(cls, args, **kwargs):
dicts, training = cls.prepare(args, **kwargs)
return cls(args, dicts, training)
@classmethod
def prepare(cls, args, **kargs):
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
if args.lang_pairs is None:
raise ValueError('--lang-pairs is required. List all the language pairs in the training objective.')
if isinstance(args.lang_pairs, str):
args.lang_pairs = args.lang_pairs.split(',')
sorted_langs = sorted(list({x for lang_pair in args.lang_pairs for x in lang_pair.split('-')}))
if args.source_lang is not None or args.target_lang is not None:
training = False
else:
training = True
# load dictionaries
dicts = OrderedDict()
for lang in sorted_langs:
paths = utils.split_paths(args.data)
assert len(paths) > 0
dicts[lang] = Dictionary.load(os.path.join(paths[0], 'dict.{}.txt'.format(lang)))
if len(dicts) > 0:
assert dicts[lang].pad() == dicts[sorted_langs[0]].pad()
assert dicts[lang].eos() == dicts[sorted_langs[0]].eos()
assert dicts[lang].unk() == dicts[sorted_langs[0]].unk()
if args.encoder_langtok is not None or args.decoder_langtok:
for lang_to_add in sorted_langs:
dicts[lang].add_symbol(_lang_token(lang_to_add))
logger.info('[{}] dictionary: {} types'.format(lang, len(dicts[lang])))
return dicts, training
def get_encoder_langtok(self, src_lang, tgt_lang):
if self.args.encoder_langtok is None:
return self.dicts[src_lang].eos()
if self.args.encoder_langtok == 'src':
return _lang_token_index(self.dicts[src_lang], src_lang)
else:
return _lang_token_index(self.dicts[src_lang], tgt_lang)
def get_decoder_langtok(self, tgt_lang):
if not self.args.decoder_langtok:
return self.dicts[tgt_lang].eos()
return _lang_token_index(self.dicts[tgt_lang], tgt_lang)
def alter_dataset_langtok(self, lang_pair_dataset,
src_eos=None, src_lang=None, tgt_eos=None, tgt_lang=None):
if self.args.encoder_langtok is None and not self.args.decoder_langtok:
return lang_pair_dataset
new_src_eos = None
if self.args.encoder_langtok is not None and src_eos is not None \
and src_lang is not None and tgt_lang is not None:
new_src_eos = self.get_encoder_langtok(src_lang, tgt_lang)
else:
src_eos = None
new_tgt_bos = None
if self.args.decoder_langtok and tgt_eos is not None and tgt_lang is not None:
new_tgt_bos = self.get_decoder_langtok(tgt_lang)
else:
tgt_eos = None
return TransformEosLangPairDataset(
lang_pair_dataset,
src_eos=src_eos,
new_src_eos=new_src_eos,
tgt_bos=tgt_eos,
new_tgt_bos=new_tgt_bos,
)
def load_dataset(self, split, epoch=1, **kwargs):
"""Load a dataset split."""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
def language_pair_dataset(lang_pair):
src, tgt = lang_pair.split('-')
langpair_dataset = load_langpair_dataset(
data_path, split, src, self.dicts[src], tgt, self.dicts[tgt],
combine=True, dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
)
return self.alter_dataset_langtok(
langpair_dataset,
src_eos=self.dicts[src].eos(),
src_lang=src,
tgt_eos=self.dicts[tgt].eos(),
tgt_lang=tgt,
)
self.datasets[split] = RoundRobinZipDatasets(
OrderedDict([
(lang_pair, language_pair_dataset(lang_pair))
for lang_pair in self.lang_pairs
]),
eval_key=None if self.training else "%s-%s" % (self.args.source_lang, self.args.target_lang),
)
def build_dataset_for_inference(self, src_tokens, src_lengths):
lang_pair = "%s-%s" % (self.args.source_lang, self.args.target_lang)
return RoundRobinZipDatasets(
OrderedDict([(
lang_pair,
self.alter_dataset_langtok(
LanguagePairDataset(
src_tokens, src_lengths,
self.source_dictionary
),
src_eos=self.source_dictionary.eos(),
src_lang=self.args.source_lang,
tgt_eos=self.target_dictionary.eos(),
tgt_lang=self.args.target_lang,
),
)]),
eval_key=lang_pair,
)
def build_model(self, args):
def check_args():
messages = []
if len(set(self.args.lang_pairs).symmetric_difference(args.lang_pairs)) != 0:
messages.append('--lang-pairs should include all the language pairs {}.'.format(args.lang_pairs))
if self.args.encoder_langtok != args.encoder_langtok:
messages.append('--encoder-langtok should be {}.'.format(args.encoder_langtok))
if self.args.decoder_langtok != args.decoder_langtok:
messages.append('--decoder-langtok should {} be set.'.format("" if args.decoder_langtok else "not"))
if len(messages) > 0:
raise ValueError(' '.join(messages))
# Check if task args are consistant with model args
check_args()
from fairseq import models
model = models.build_model(args, self)
if not isinstance(model, FairseqMultiModel):
raise ValueError('MultilingualTranslationTask requires a FairseqMultiModel architecture')
return model
def train_step(self, sample, model, criterion, optimizer, update_num, ignore_grad=False):
model.train()
from collections import defaultdict
agg_loss, agg_sample_size, agg_logging_output = 0., 0., defaultdict(float)
curr_lang_pairs = [
lang_pair
for lang_pair in self.model_lang_pairs
if sample[lang_pair] is not None and len(sample[lang_pair]) != 0
]
for idx, lang_pair in enumerate(curr_lang_pairs):
def maybe_no_sync():
if (
self.args.distributed_world_size > 1
and hasattr(model, 'no_sync')
and idx < len(curr_lang_pairs) - 1
):
return model.no_sync()
else:
return contextlib.ExitStack() # dummy contextmanager
with maybe_no_sync():
loss, sample_size, logging_output = criterion(model.models[lang_pair], sample[lang_pair])
if ignore_grad:
loss *= 0
optimizer.backward(loss)
agg_loss += loss.detach().item()
# TODO make summing of the sample sizes configurable
agg_sample_size += sample_size
for k in logging_output:
agg_logging_output[k] += logging_output[k]
agg_logging_output[f"{lang_pair}:{k}"] += logging_output[k]
return agg_loss, agg_sample_size, agg_logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
from collections import defaultdict
agg_loss, agg_sample_size, agg_logging_output = 0., 0., defaultdict(float)
for lang_pair in self.eval_lang_pairs:
if lang_pair not in sample or sample[lang_pair] is None or len(sample[lang_pair]) == 0:
continue
loss, sample_size, logging_output = criterion(model.models[lang_pair], sample[lang_pair])
agg_loss += loss.data.item()
# TODO make summing of the sample sizes configurable
agg_sample_size += sample_size
for k in logging_output:
agg_logging_output[k] += logging_output[k]
agg_logging_output[f"{lang_pair}:{k}"] += logging_output[k]
return agg_loss, agg_sample_size, agg_logging_output
def inference_step(self, generator, models, sample, prefix_tokens=None):
with torch.no_grad():
return generator.generate(
models,
sample,
prefix_tokens=prefix_tokens,
bos_token=_lang_token_index(self.target_dictionary, self.args.target_lang)
if self.args.decoder_langtok else self.target_dictionary.eos(),
)
def reduce_metrics(self, logging_outputs, criterion):
with metrics.aggregate():
# pass 'sample_size', 'nsentences', 'ntokens' stats to fairseq_task
super().reduce_metrics(logging_outputs, criterion)
for k in ['sample_size', 'nsentences', 'ntokens']:
metrics.log_scalar(k, sum(l[k] for l in logging_outputs))
@property
def source_dictionary(self):
if self.training:
return next(iter(self.dicts.values()))
else:
return self.dicts[self.args.source_lang]
@property
def target_dictionary(self):
if self.training:
return next(iter(self.dicts.values()))
else:
return self.dicts[self.args.target_lang]
def max_positions(self):
"""Return the max sentence length allowed by the task."""
if len(self.datasets.values()) == 0:
return {'%s-%s' % (self.args.source_lang, self.args.target_lang):
(self.args.max_source_positions, self.args.max_target_positions)}
return OrderedDict([
(key, (self.args.max_source_positions, self.args.max_target_positions))
for split in self.datasets.keys()
for key in self.datasets[split].datasets.keys()
])
|
class FieldManifest:
def __init__(self,
field_id=None,
data_type="RawField",
element_query=None,
data_attribute=None,
child_selectors=None,
flatten_data=False
):
self.field_id = field_id
self.data_type = data_type
self.element_query = element_query
self.data_attribute = data_attribute
if (data_attribute != "element" and flatten_data is True) or (
data_attribute == "element" and data_type != "DictField" and flatten_data is True):
raise Exception(
"error on `{field_id}` field manifest : flatten_data=True, can only be used with "
"data_attribute=='element' and data_type=='DictField'".format(field_id=field_id)
)
self.flatten_data = flatten_data
if child_selectors:
self.child_selectors = [FieldManifest(**child_selector) for child_selector in
child_selectors]
else:
self.child_selectors = []
def __repr__(self):
return "<FieldManifest data_type='{data_type}' " \
"element_query='{element_query}' " \
"data_attribute='{data_attribute}' " \
">".format(data_type=self.data_type, element_query=self.element_query,
data_attribute=self.data_attribute)
def to_dict(self):
return {
"field_id": self.field_id,
"element_query": self.element_query,
"data_type": self.data_type,
"data_attribute": self.data_attribute
}
|
# Checking FK and IK to make sure the code is right
from math import cos, sin, atan2, sqrt, acos, pi
# Forward Kinematics
def FKin(q1, q2, q3):
"""q1 limit = -1.13 to 1.57, q2 limit = -2.64 to 2.55, q3 limit = -1.78 to 1.78"""
l1 = 0.155
l2 = 0.135
l3 = 0.218
phi = q1 + q2 + q3
print(phi)
if phi > pi:
phi = phi- (2*pi)
elif phi < -pi:
phi = (2*pi) + phi
x = (l1*cos(q1)) + (l2*cos(q1+q2)) + (l3*cos(phi))
y = (l1*sin(q1)) + (l2*sin(q1+q2)) + (l3*sin(phi))
print("FK result for {}, {}, {} is {}, {}, {}".format(q1, q2, q3, x, y, phi))
return [x, y, phi]
def IKin(x, y, phi):
"""calculate IK"""
l1 = 0.155
l2 = 0.135
l3 = 0.218
big_X = x - (l3*cos(phi))
big_Y = y - (l3*sin(phi))
denom1 = sqrt(big_X**2 + big_Y**2)
gamma = atan2((-big_Y/denom1), (-big_X/denom1))
denom2 = 2*l1*(sqrt(big_X**2 + big_Y**2))
# if phi < 0:
# q1 = gamma + acos(-(big_X**2 + big_Y**2 + l1**2 - l2**2)/denom2)
# else:
# q1 = gamma - acos(-(big_X**2 + big_Y**2 + l1**2 - l2**2)/denom2)
q1_A = gamma + acos(-(big_X**2 + big_Y**2 + l1**2 - l2**2)/denom2)
q1_B = gamma - acos(-(big_X**2 + big_Y**2 + l1**2 - l2**2)/denom2)
if q1_A < q1_B:
q1 = q1_A
else:
q1 = q1_B
q2 = atan2(((big_Y - (l1*sin(q1)))/l2), ((big_X - (l1*cos(q1)))/l2)) - q1
q3 = phi - q1 - q2
if q1 > pi:
q1 = q1 - (2*pi)
elif q1 < -pi:
q1 = (2*pi) + q1
if q2 > pi:
q2 = q2 - (2*pi)
elif q2 < -pi:
q2 = (2*pi) + q2
if q3 > pi:
q3 = q3 - (2*pi)
elif q3 < -pi:
q3 = (2*pi) + q3
print("IK result for {}, {}, {}, is {}, {}, {}".format(x, y, phi, q1, q2, q3))
return [q1, q2, q3]
#test
# q11 = -1.0
# q12 = -0.6
# q13 = -0.75
# q1 limit = -1.13 to 1.57, q2 limit = -2.64 to 2.55, q3 limit = -1.78 to 1.78
[x2, y2, phi2] = FKin(-1.0,-0.6,-1.25) #box
IKin(x2, y2, phi2)
[x1, y1, phi1] = FKin(-0.7, -1.3, -0.75) #trolley
IKin(x1, y1, phi1)
|
"""
Zorp tests
"""
|
n = 5
for x in range(0, n):
for y in range(0, n):
if y > x:
print("*", end=" ")
else:
print(chr(y+65), end=" ")
print()
"""
65 > ASCII of 'A'
"""
|
# coding: utf-8
"""
LogicMonitor REST API
LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import logicmonitor_sdk
from logicmonitor_sdk.models.jdbc_collector_attribute import JDBCCollectorAttribute # noqa: E501
from logicmonitor_sdk.rest import ApiException
class TestJDBCCollectorAttribute(unittest.TestCase):
"""JDBCCollectorAttribute unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testJDBCCollectorAttribute(self):
"""Test JDBCCollectorAttribute"""
# FIXME: construct object with mandatory attributes with example values
# model = logicmonitor_sdk.models.jdbc_collector_attribute.JDBCCollectorAttribute() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
try:
from .code import var_2_cool_json
except ImportError:
from code import var_2_cool_json
import unittest
class FullTest(unittest.TestCase):
def test_1(self):
"""
Basic test that this thing just works :)
:return:
"""
test_var = {
'a' : 1,
'b' : 2
}
self.assertGreater( len(var_2_cool_json(test_var)), 10 )
if __name__ == '__main__':
unittest.main()
|
"""
WiderFace evaluation code
author: wondervictor
mail: tianhengcheng@gmail.com
copyright@wondervictor
MIT License
Copyright (c) 2018 Vic Chan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import division
import os
import pickle
import argparse
import numpy as np
from scipy.io import loadmat
from bbox import bbox_overlaps
def get_gt_boxes(gt_dir):
""" gt dir: (wider_face_val.mat, wider_easy_val.mat, wider_medium_val.mat, wider_hard_val.mat)"""
gt_mat = loadmat(os.path.join(gt_dir, 'val.mat')) # you own ground_truth name
hard_mat = loadmat(os.path.join(gt_dir, 'wider_hard_val.mat'))
medium_mat = loadmat(os.path.join(gt_dir, 'wider_medium_val.mat'))
easy_mat = loadmat(os.path.join(gt_dir, 'wider_easy_val.mat'))
facebox_list = gt_mat['face_bbx_list']
event_list = gt_mat['event_list']
file_list = gt_mat['file_list']
hard_gt_list = hard_mat['gt_list']
medium_gt_list = medium_mat['gt_list']
easy_gt_list = easy_mat['gt_list']
return facebox_list, event_list, file_list, hard_gt_list, medium_gt_list, easy_gt_list
def get_gt_boxes_from_txt(gt_path, cache_dir):
"""
Get gt boxes from binary txt file.
"""
cache_file = os.path.join(cache_dir, 'gt_cache.pkl')
if os.path.exists(cache_file):
f = open(cache_file, 'rb')
boxes = pickle.load(f)
f.close()
return boxes
f = open(gt_path, 'r')
state = 0
lines = f.readlines()
lines = list(map(lambda x: x.rstrip('\r\n'), lines))
boxes = {}
f.close()
current_boxes = []
current_name = None
for line in lines:
if state == 0 and '--' in line:
state = 1
current_name = line
continue
if state == 1:
state = 2
continue
if state == 2 and '--' in line:
state = 1
boxes[current_name] = np.array(current_boxes).astype('float32')
current_name = line
current_boxes = []
continue
if state == 2:
box = [float(x) for x in line.split(' ')[:4]]
current_boxes.append(box)
continue
f = open(cache_file, 'wb')
pickle.dump(boxes, f)
f.close()
return boxes
def read_pred_file(filepath):
with open(filepath, 'r') as f:
lines = f.readlines()
img_file = lines[0].rstrip('\n\r')
lines = lines[2:]
boxes = np.array(list(map(lambda x: [float(a) for a in x.rstrip('\r\n').split(' ')], lines))).astype('float')
return img_file.split('/')[-1], boxes
def get_preds(pred_dir):
"""Get preds"""
events = os.listdir(pred_dir)
boxes = dict()
pbar = events
for event in pbar:
event_dir = os.path.join(pred_dir, event)
event_images = os.listdir(event_dir)
current_event = dict()
for imgtxt in event_images:
imgname, box = read_pred_file(os.path.join(event_dir, imgtxt))
current_event[imgname.rstrip('.jpg')] = box
boxes[event] = current_event
return boxes
def norm_score(pred_norm):
""" norm score
pred_norm {key: [[x1,y1,x2,y2,s]]}
"""
max_score = 0
min_score = 1
for _, k in pred_norm.items():
for _, v in k.items():
if v.size == 0:
continue
min_v = np.min(v[:, -1])
max_v = np.max(v[:, -1])
max_score = max(max_v, max_score)
min_score = min(min_v, min_score)
diff = max_score - min_score
for _, k in pred_norm.items():
for _, v in k.items():
if v.size == 0:
continue
v[:, -1] = (v[:, -1] - min_score)/diff
def image_eval(pred_eval, gt, ignore, iou_thresh):
""" single image evaluation
pred_eval: Nx5
gt: Nx4
ignore:
"""
pred_t = pred_eval.copy()
gt_t = gt.copy()
pred_recall = np.zeros(pred_t.shape[0])
recall_list = np.zeros(gt_t.shape[0])
proposal_list = np.ones(pred_t.shape[0])
pred_t[:, 2] = pred_t[:, 2] + pred_t[:, 0]
pred_t[:, 3] = pred_t[:, 3] + pred_t[:, 1]
gt_t[:, 2] = gt_t[:, 2] + gt_t[:, 0]
gt_t[:, 3] = gt_t[:, 3] + gt_t[:, 1]
overlaps = bbox_overlaps(pred_t[:, :4], gt_t)
for h in range(pred_t.shape[0]):
gt_overlap = overlaps[h]
max_overlap, max_idx = gt_overlap.max(), gt_overlap.argmax()
if max_overlap >= iou_thresh:
if ignore[max_idx] == 0:
recall_list[max_idx] = -1
proposal_list[h] = -1
elif recall_list[max_idx] == 0:
recall_list[max_idx] = 1
r_keep_index = np.where(recall_list == 1)[0]
pred_recall[h] = len(r_keep_index)
return pred_recall, proposal_list
def img_pr_info(thresh_num, pred_info, proposal_list, pred_recall):
"""
Image pr info
"""
pr_info = np.zeros((thresh_num, 2)).astype('float')
for t in range(thresh_num):
thresh = 1 - (t+1)/thresh_num
r_index = np.where(pred_info[:, 4] >= thresh)[0]
if r_index.size == 0:
pr_info[t, 0] = 0
pr_info[t, 1] = 0
else:
r_index = r_index[-1]
p_index = np.where(proposal_list[:r_index+1] == 1)[0]
pr_info[t, 0] = len(p_index)
pr_info[t, 1] = pred_recall[r_index]
return pr_info
def dataset_pr_info(thresh_num, pr_curve, count_face):
pr_curve_t = np.zeros((thresh_num, 2))
for i in range(thresh_num):
pr_curve_t[i, 0] = pr_curve[i, 1] / pr_curve[i, 0]
pr_curve_t[i, 1] = pr_curve[i, 1] / count_face
return pr_curve_t
def voc_ap(rec, prec):
"""
Voc ap calculation
"""
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def evaluation(pred_evaluation, gt_path, iou_thresh=0.4):
"""
evaluation method.
"""
print_pred = pred_evaluation
pred_evaluation = get_preds(pred_evaluation)
norm_score(pred_evaluation)
facebox_list, event_list, file_list, hard_gt_list, medium_gt_list, easy_gt_list = get_gt_boxes(gt_path)
event_num = len(event_list)
thresh_num = 1000
setting_gts = [easy_gt_list, medium_gt_list, hard_gt_list]
aps = []
for setting_id in range(3):
# different setting
gt_list = setting_gts[setting_id]
count_face = 0
pr_curve = np.zeros((thresh_num, 2)).astype('float')
# [hard, medium, easy]
pbar = range(event_num)
error_count = 0
for i in pbar:
event_name = str(event_list[i][0][0])
img_list = file_list[i][0]
pred_list = pred_evaluation[event_name]
sub_gt_list = gt_list[i][0]
gt_bbx_list = facebox_list[i][0]
for j, _ in enumerate(img_list):
try:
pred_info = pred_list[str(img_list[j][0][0])]
except KeyError:
error_count += 1
continue
gt_boxes = gt_bbx_list[j][0].astype('float')
keep_index = sub_gt_list[j][0]
count_face += len(keep_index)
if gt_boxes.size == 0 or pred_info.size == 0:
continue
ignore = np.zeros(gt_boxes.shape[0])
if keep_index.size != 0:
ignore[keep_index-1] = 1
pred_recall, proposal_list = image_eval(pred_info, gt_boxes, ignore, iou_thresh)
pr_curve += img_pr_info(thresh_num, pred_info, proposal_list, pred_recall)
pr_curve = dataset_pr_info(thresh_num, pr_curve, count_face)
propose = pr_curve[:, 0]
recall = pr_curve[:, 1]
ap = voc_ap(recall, propose)
aps.append(ap)
print("==================== Results = ====================", print_pred)
print("Easy Val AP: {}".format(aps[0]))
print("Medium Val AP: {}".format(aps[1]))
print("Hard Val AP: {}".format(aps[2]))
print("=================================================")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--pred', default='',
help='test output, txt contain box positions and scores')
parser.add_argument('-g', '--gt', default='', help='ground truth path, mat format')
args = parser.parse_args()
pred = args.pred
if os.path.isdir(pred):
evaluation(pred, args.gt)
else:
pass
|
"""Get descendant/parent counts for all GO terms in a GODag and broad L0 and L1 terms."""
from __future__ import print_function
__copyright__ = "Copyright (C) 2016-2018, DV Klopfenstein, H Tang, All rights reserved."
__author__ = "DV Klopfenstein"
import collections as cx
from itertools import chain
from goatools.godag.go_tasks import get_go2parents
from goatools.godag.go_tasks import get_go2upper
from goatools.godag.go_tasks import get_go2children
from goatools.godag.go_tasks import get_go2lower
from goatools.gosubdag.go_tasks import get_goobjs_altgo2goobj
from goatools.gosubdag.go_tasks import add_alt_goids
class CountRelativesInit(object):
"""Get descendant/parent counts for all GO terms in a GODag and broad L0 and L1 terms."""
def __init__(self, go2obj, relationships, dcnt, go2letter):
# Subset go2obj contains only items needed by go_sources
self.go2obj = go2obj
self.relationships = relationships
self.dcnt = dcnt
self.go2letter = go2letter
# Ex: set(['part_of', 'regulates', 'negatively_regulates', 'positively_regulates'])
_goobjs, _altgo2goobj = get_goobjs_altgo2goobj(self.go2obj)
_r0 = not relationships # True if not using relationships
self.go2descendants = get_go2children(_goobjs) if _r0 else get_go2lower(_goobjs)
self.go2parents = get_go2parents(_goobjs) if _r0 else get_go2upper(_goobjs)
self.go2dcnt = {go: len(p) for go, p in self.go2descendants.items()}
add_alt_goids(self.go2parents, _altgo2goobj)
add_alt_goids(self.go2descendants, _altgo2goobj)
add_alt_goids(self.go2dcnt, _altgo2goobj)
# print('INIT CountRelativesInit', self.relationships)
def get_relationship_dicts(self):
"""Given GO DAG relationships, return summaries per GO ID."""
if not self.relationships:
return None
for goid, goobj in self.go2obj.items():
for reltyp, relset in goobj.relationship.items():
relfwd_goids = set(o.id for o in relset)
# for relfwd_goid in relfwd_goids:
# assert relfwd_goid in self.go2obj, "{GO} {REL} NOT FOUND {GO_R}".format(
# GO=goid, REL=reltyp, GO_R=relfwd_goid)
print("CountRelativesInit RELLLLS", goid, goobj.id, reltyp, relfwd_goids)
def get_goone2ntletter(self, go2dcnt, depth2goobjs):
"""Assign letters to depth-01 GO terms ordered using descendants cnt."""
# 1. Group level-01/depth-01 GO terms by namespace
ns2dcntgoobj = cx.defaultdict(list)
for goobj in depth2goobjs[1]:
dcnt = go2dcnt[goobj.id]
ns2dcntgoobj[goobj.namespace].append((dcnt, goobj))
# 2. Assign letters to level-01/depth-01 GO terms
go2nt = {}
ntobj = cx.namedtuple("NtGoLetters", "D1 dcnt goobj")
_go2abc = self.go2letter
letters = list(chain(range(ord('A'), ord('Z') + 1), range(ord('a'), ord('z') + 1)))
for list_dcnt_goobj in ns2dcntgoobj.values():
letter_idx = 0
for dcnt, goobj in sorted(list_dcnt_goobj, key=lambda t: t[0], reverse=True):
letter = chr(letters[letter_idx]) if _go2abc is None else _go2abc.get(goobj.id, '')
go2nt[goobj.id] = ntobj._make([letter, dcnt, goobj])
letter_idx += 1
return go2nt
@staticmethod
def get_depth2goobjs(go2obj, max_depth=2):
"""Init depth2goobjs using list sorted by depth, get level-00/01 GO terms."""
depth2goobjs = {d:list() for d in range(max_depth+1)}
goid_seen = set()
for _, goobj in sorted(go2obj.items(), key=lambda t: t[1].depth):
# Save depth-00, depth-01, depth-02
if goobj.depth > max_depth:
break
goid = goobj.id
if not goobj.is_obsolete and goid not in goid_seen:
depth2goobjs[goobj.depth].append(goobj)
goid_seen.add(goid)
return depth2goobjs
# Copyright (C) 2016-2018, DV Klopfenstein, H Tang, All rights reserved.
|
import os
import sys
import psutil
import tensorflow as tf
import numpy as np
from collections import defaultdict, OrderedDict
from tabulate import tabulate
import tensorpack
from ..compat import tfv1
from ..utils.utils import find_library_full_path as find_library
from ..utils.nvml import NVMLContext
from ..libinfo import __git_version__
def parse_TF_build_info():
ret = OrderedDict()
from tensorflow.python.platform import build_info
try:
for k, v in list(build_info.build_info.items()):
if k == "cuda_version":
ret["TF built with CUDA"] = v
elif k == "cudnn_version":
ret["TF built with CUDNN"] = v
elif k == "cuda_compute_capabilities":
ret["TF compute capabilities"] = ",".join([k.replace("compute_", "") for k in v])
return ret
except AttributeError:
pass
try:
ret["TF built with CUDA"] = build_info.cuda_version_number
ret["TF built with CUDNN"] = build_info.cudnn_version_number
except AttributeError:
pass
return ret
def collect_env_info():
"""
Returns:
str - a table contains important information about the environment
"""
data = []
data.append(("sys.platform", sys.platform))
data.append(("Python", sys.version.replace("\n", "")))
data.append(("Tensorpack", __git_version__ + " @" + os.path.dirname(tensorpack.__file__)))
data.append(("Numpy", np.__version__))
data.append(("TensorFlow", tfv1.VERSION + "/" + tfv1.GIT_VERSION + " @" + os.path.dirname(tf.__file__)))
data.append(("TF Compiler Version", tfv1.COMPILER_VERSION))
has_cuda = tf.test.is_built_with_cuda()
data.append(("TF CUDA support", has_cuda))
try:
from tensorflow.python.framework import test_util
data.append(("TF MKL support", test_util.IsMklEnabled()))
except Exception:
pass
try:
from tensorflow.python.framework import test_util
data.append(("TF XLA support", test_util.is_xla_enabled()))
except Exception:
pass
if has_cuda:
data.append(("Nvidia Driver", find_library("nvidia-ml")))
data.append(("CUDA libs", find_library("cudart")))
data.append(("CUDNN libs", find_library("cudnn")))
for k, v in parse_TF_build_info().items():
data.append((k, v))
data.append(("NCCL libs", find_library("nccl")))
# List devices with NVML
data.append(
("CUDA_VISIBLE_DEVICES",
os.environ.get("CUDA_VISIBLE_DEVICES", "Unspecified")))
try:
devs = defaultdict(list)
with NVMLContext() as ctx:
for idx, dev in enumerate(ctx.devices()):
devs[dev.name()].append(str(idx))
for devname, devids in devs.items():
data.append(
("GPU " + ",".join(devids), devname))
except Exception:
data.append(("GPU", "Not found with NVML"))
vram = psutil.virtual_memory()
data.append(("Free RAM", "{:.2f}/{:.2f} GB".format(vram.available / 1024**3, vram.total / 1024**3)))
data.append(("CPU Count", psutil.cpu_count()))
# Other important dependencies:
try:
import horovod
data.append(("Horovod", horovod.__version__ + " @" + os.path.dirname(horovod.__file__)))
except ImportError:
pass
try:
import cv2
data.append(("cv2", cv2.__version__))
except ImportError:
pass
import msgpack
data.append(("msgpack", ".".join([str(x) for x in msgpack.version])))
has_prctl = True
try:
import prctl
_ = prctl.set_pdeathsig # noqa
except Exception:
has_prctl = False
data.append(("python-prctl", has_prctl))
return tabulate(data)
if __name__ == '__main__':
print(collect_env_info())
print("Detecting GPUs using TensorFlow:")
try:
# available since TF 1.14
gpu_devices = tf.config.experimental.list_physical_devices('GPU')
gpu_devices = [x.name for x in gpu_devices]
except AttributeError:
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
gpu_devices = [x.name for x in local_device_protos if x.device_type == 'GPU']
print("GPUs:", ", ".join(gpu_devices))
|
"""
The typing module: Support for gradual typing as defined by PEP 484.
At large scale, the structure of the module is following:
* Imports and exports, all public names should be explicitly added to __all__.
* Internal helper functions: these should never be used in code outside this module.
* _SpecialForm and its instances (special forms):
Any, NoReturn, ClassVar, Union, Optional, Concatenate
* Classes whose instances can be type arguments in addition to types:
ForwardRef, TypeVar and ParamSpec
* The core of internal generics API: _GenericAlias and _VariadicGenericAlias, the latter is
currently only used by Tuple and Callable. All subscripted types like X[int], Union[int, str],
etc., are instances of either of these classes.
* The public counterpart of the generics API consists of two classes: Generic and Protocol.
* Public helper functions: get_type_hints, overload, cast, no_type_check,
no_type_check_decorator.
* Generic aliases for collections.abc ABCs and few additional protocols.
* Special types: NewType, NamedTuple, TypedDict.
* Wrapper submodules for re and io related types.
"""
from abc import abstractmethod, ABCMeta
import collections
import collections.abc
import contextlib
import functools
import operator
import re as stdlib_re # Avoid confusion with the re we export.
import sys
import types
import warnings
from types import WrapperDescriptorType, MethodWrapperType, MethodDescriptorType, GenericAlias
try:
from _typing import _idfunc
except ImportError:
def _idfunc(_, x):
return x
# Please keep __all__ alphabetized within each category.
__all__ = [
# Super-special typing primitives.
'Annotated',
'Any',
'Callable',
'ClassVar',
'Concatenate',
'Final',
'ForwardRef',
'Generic',
'Literal',
'Optional',
'ParamSpec',
'Protocol',
'Tuple',
'Type',
'TypeVar',
'Union',
# ABCs (from collections.abc).
'AbstractSet', # collections.abc.Set.
'ByteString',
'Container',
'ContextManager',
'Hashable',
'ItemsView',
'Iterable',
'Iterator',
'KeysView',
'Mapping',
'MappingView',
'MutableMapping',
'MutableSequence',
'MutableSet',
'Sequence',
'Sized',
'ValuesView',
'Awaitable',
'AsyncIterator',
'AsyncIterable',
'Coroutine',
'Collection',
'AsyncGenerator',
'AsyncContextManager',
# Structural checks, a.k.a. protocols.
'Reversible',
'SupportsAbs',
'SupportsBytes',
'SupportsComplex',
'SupportsFloat',
'SupportsIndex',
'SupportsInt',
'SupportsRound',
# Concrete collection types.
'ChainMap',
'Counter',
'Deque',
'Dict',
'DefaultDict',
'List',
'OrderedDict',
'Set',
'FrozenSet',
'NamedTuple', # Not really a type.
'TypedDict', # Not really a type.
'Generator',
# Other concrete types.
'BinaryIO',
'IO',
'Match',
'Pattern',
'TextIO',
# One-off things.
'AnyStr',
'cast',
'final',
'get_args',
'get_origin',
'get_type_hints',
'is_typeddict',
'NewType',
'no_type_check',
'no_type_check_decorator',
'NoReturn',
'overload',
'ParamSpecArgs',
'ParamSpecKwargs',
'runtime_checkable',
'Text',
'TYPE_CHECKING',
'TypeAlias',
'TypeGuard',
]
# The pseudo-submodules 're' and 'io' are part of the public
# namespace, but excluded from __all__ because they might stomp on
# legitimate imports of those modules.
def _type_convert(arg, module=None):
"""For converting None to type(None), and strings to ForwardRef."""
if arg is None:
return type(None)
if isinstance(arg, str):
return ForwardRef(arg, module=module)
return arg
def _type_check(arg, msg, is_argument=True, module=None, *, is_class=False):
"""Check that the argument is a type, and return it (internal helper).
As a special case, accept None and return type(None) instead. Also wrap strings
into ForwardRef instances. Consider several corner cases, for example plain
special forms like Union are not valid, while Union[int, str] is OK, etc.
The msg argument is a human-readable error message, e.g::
"Union[arg, ...]: arg should be a type."
We append the repr() of the actual value (truncated to 100 chars).
"""
invalid_generic_forms = (Generic, Protocol)
if not is_class:
invalid_generic_forms += (ClassVar,)
if is_argument:
invalid_generic_forms += (Final,)
arg = _type_convert(arg, module=module)
if (isinstance(arg, _GenericAlias) and
arg.__origin__ in invalid_generic_forms):
raise TypeError(f"{arg} is not valid as type argument")
if arg in (Any, NoReturn, Final):
return arg
if isinstance(arg, _SpecialForm) or arg in (Generic, Protocol):
raise TypeError(f"Plain {arg} is not valid as type argument")
if isinstance(arg, (type, TypeVar, ForwardRef, types.UnionType, ParamSpec)):
return arg
if not callable(arg):
raise TypeError(f"{msg} Got {arg!r:.100}.")
return arg
def _is_param_expr(arg):
return arg is ... or isinstance(arg,
(tuple, list, ParamSpec, _ConcatenateGenericAlias))
def _type_repr(obj):
"""Return the repr() of an object, special-casing types (internal helper).
If obj is a type, we return a shorter version than the default
type.__repr__, based on the module and qualified name, which is
typically enough to uniquely identify a type. For everything
else, we fall back on repr(obj).
"""
if isinstance(obj, types.GenericAlias):
return repr(obj)
if isinstance(obj, type):
if obj.__module__ == 'builtins':
return obj.__qualname__
return f'{obj.__module__}.{obj.__qualname__}'
if obj is ...:
return('...')
if isinstance(obj, types.FunctionType):
return obj.__name__
return repr(obj)
def _collect_type_vars(types_, typevar_types=None):
"""Collect all type variable contained
in types in order of first appearance (lexicographic order). For example::
_collect_type_vars((T, List[S, T])) == (T, S)
"""
if typevar_types is None:
typevar_types = TypeVar
tvars = []
for t in types_:
if isinstance(t, typevar_types) and t not in tvars:
tvars.append(t)
if isinstance(t, (_GenericAlias, GenericAlias, types.UnionType)):
tvars.extend([t for t in t.__parameters__ if t not in tvars])
return tuple(tvars)
def _check_generic(cls, parameters, elen):
"""Check correct count for parameters of a generic cls (internal helper).
This gives a nice error message in case of count mismatch.
"""
if not elen:
raise TypeError(f"{cls} is not a generic class")
alen = len(parameters)
if alen != elen:
raise TypeError(f"Too {'many' if alen > elen else 'few'} arguments for {cls};"
f" actual {alen}, expected {elen}")
def _prepare_paramspec_params(cls, params):
"""Prepares the parameters for a Generic containing ParamSpec
variables (internal helper).
"""
# Special case where Z[[int, str, bool]] == Z[int, str, bool] in PEP 612.
if (len(cls.__parameters__) == 1
and params and not _is_param_expr(params[0])):
assert isinstance(cls.__parameters__[0], ParamSpec)
return (params,)
else:
_check_generic(cls, params, len(cls.__parameters__))
_params = []
# Convert lists to tuples to help other libraries cache the results.
for p, tvar in zip(params, cls.__parameters__):
if isinstance(tvar, ParamSpec) and isinstance(p, list):
p = tuple(p)
_params.append(p)
return tuple(_params)
def _deduplicate(params):
# Weed out strict duplicates, preserving the first of each occurrence.
all_params = set(params)
if len(all_params) < len(params):
new_params = []
for t in params:
if t in all_params:
new_params.append(t)
all_params.remove(t)
params = new_params
assert not all_params, all_params
return params
def _remove_dups_flatten(parameters):
"""An internal helper for Union creation and substitution: flatten Unions
among parameters, then remove duplicates.
"""
# Flatten out Union[Union[...], ...].
params = []
for p in parameters:
if isinstance(p, (_UnionGenericAlias, types.UnionType)):
params.extend(p.__args__)
elif isinstance(p, tuple) and len(p) > 0 and p[0] is Union:
params.extend(p[1:])
else:
params.append(p)
return tuple(_deduplicate(params))
def _flatten_literal_params(parameters):
"""An internal helper for Literal creation: flatten Literals among parameters"""
params = []
for p in parameters:
if isinstance(p, _LiteralGenericAlias):
params.extend(p.__args__)
else:
params.append(p)
return tuple(params)
_cleanups = []
def _tp_cache(func=None, /, *, typed=False):
"""Internal wrapper caching __getitem__ of generic types with a fallback to
original function for non-hashable arguments.
"""
def decorator(func):
cached = functools.lru_cache(typed=typed)(func)
_cleanups.append(cached.cache_clear)
@functools.wraps(func)
def inner(*args, **kwds):
try:
return cached(*args, **kwds)
except TypeError:
pass # All real errors (not unhashable args) are raised below.
return func(*args, **kwds)
return inner
if func is not None:
return decorator(func)
return decorator
def _eval_type(t, globalns, localns, recursive_guard=frozenset()):
"""Evaluate all forward references in the given type t.
For use of globalns and localns see the docstring for get_type_hints().
recursive_guard is used to prevent prevent infinite recursion
with recursive ForwardRef.
"""
if isinstance(t, ForwardRef):
return t._evaluate(globalns, localns, recursive_guard)
if isinstance(t, (_GenericAlias, GenericAlias, types.UnionType)):
ev_args = tuple(_eval_type(a, globalns, localns, recursive_guard) for a in t.__args__)
if ev_args == t.__args__:
return t
if isinstance(t, GenericAlias):
return GenericAlias(t.__origin__, ev_args)
if isinstance(t, types.UnionType):
return functools.reduce(operator.or_, ev_args)
else:
return t.copy_with(ev_args)
return t
class _Final:
"""Mixin to prohibit subclassing"""
__slots__ = ('__weakref__',)
def __init_subclass__(self, /, *args, **kwds):
if '_root' not in kwds:
raise TypeError("Cannot subclass special typing classes")
class _Immutable:
"""Mixin to indicate that object should not be copied."""
__slots__ = ()
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
# Internal indicator of special typing constructs.
# See __doc__ instance attribute for specific docs.
class _SpecialForm(_Final, _root=True):
__slots__ = ('_name', '__doc__', '_getitem')
def __init__(self, getitem):
self._getitem = getitem
self._name = getitem.__name__
self.__doc__ = getitem.__doc__
def __getattr__(self, item):
if item in {'__name__', '__qualname__'}:
return self._name
raise AttributeError(item)
def __mro_entries__(self, bases):
raise TypeError(f"Cannot subclass {self!r}")
def __repr__(self):
return 'typing.' + self._name
def __reduce__(self):
return self._name
def __call__(self, *args, **kwds):
raise TypeError(f"Cannot instantiate {self!r}")
def __or__(self, other):
return Union[self, other]
def __ror__(self, other):
return Union[other, self]
def __instancecheck__(self, obj):
raise TypeError(f"{self} cannot be used with isinstance()")
def __subclasscheck__(self, cls):
raise TypeError(f"{self} cannot be used with issubclass()")
@_tp_cache
def __getitem__(self, parameters):
return self._getitem(self, parameters)
class _LiteralSpecialForm(_SpecialForm, _root=True):
def __getitem__(self, parameters):
if not isinstance(parameters, tuple):
parameters = (parameters,)
return self._getitem(self, *parameters)
@_SpecialForm
def Any(self, parameters):
"""Special type indicating an unconstrained type.
- Any is compatible with every type.
- Any assumed to have all methods.
- All values assumed to be instances of Any.
Note that all the above statements are true from the point of view of
static type checkers. At runtime, Any should not be used with instance
or class checks.
"""
raise TypeError(f"{self} is not subscriptable")
@_SpecialForm
def NoReturn(self, parameters):
"""Special type indicating functions that never return.
Example::
from typing import NoReturn
def stop() -> NoReturn:
raise Exception('no way')
This type is invalid in other positions, e.g., ``List[NoReturn]``
will fail in static type checkers.
"""
raise TypeError(f"{self} is not subscriptable")
@_SpecialForm
def ClassVar(self, parameters):
"""Special type construct to mark class variables.
An annotation wrapped in ClassVar indicates that a given
attribute is intended to be used as a class variable and
should not be set on instances of that class. Usage::
class Starship:
stats: ClassVar[Dict[str, int]] = {} # class variable
damage: int = 10 # instance variable
ClassVar accepts only types and cannot be further subscribed.
Note that ClassVar is not a class itself, and should not
be used with isinstance() or issubclass().
"""
item = _type_check(parameters, f'{self} accepts only single type.')
return _GenericAlias(self, (item,))
@_SpecialForm
def Final(self, parameters):
"""Special typing construct to indicate final names to type checkers.
A final name cannot be re-assigned or overridden in a subclass.
For example:
MAX_SIZE: Final = 9000
MAX_SIZE += 1 # Error reported by type checker
class Connection:
TIMEOUT: Final[int] = 10
class FastConnector(Connection):
TIMEOUT = 1 # Error reported by type checker
There is no runtime checking of these properties.
"""
item = _type_check(parameters, f'{self} accepts only single type.')
return _GenericAlias(self, (item,))
@_SpecialForm
def Union(self, parameters):
"""Union type; Union[X, Y] means either X or Y.
To define a union, use e.g. Union[int, str]. Details:
- The arguments must be types and there must be at least one.
- None as an argument is a special case and is replaced by
type(None).
- Unions of unions are flattened, e.g.::
Union[Union[int, str], float] == Union[int, str, float]
- Unions of a single argument vanish, e.g.::
Union[int] == int # The constructor actually returns int
- Redundant arguments are skipped, e.g.::
Union[int, str, int] == Union[int, str]
- When comparing unions, the argument order is ignored, e.g.::
Union[int, str] == Union[str, int]
- You cannot subclass or instantiate a union.
- You can use Optional[X] as a shorthand for Union[X, None].
"""
if parameters == ():
raise TypeError("Cannot take a Union of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
msg = "Union[arg, ...]: each arg must be a type."
parameters = tuple(_type_check(p, msg) for p in parameters)
parameters = _remove_dups_flatten(parameters)
if len(parameters) == 1:
return parameters[0]
if len(parameters) == 2 and type(None) in parameters:
return _UnionGenericAlias(self, parameters, name="Optional")
return _UnionGenericAlias(self, parameters)
@_SpecialForm
def Optional(self, parameters):
"""Optional type.
Optional[X] is equivalent to Union[X, None].
"""
arg = _type_check(parameters, f"{self} requires a single type.")
return Union[arg, type(None)]
@_LiteralSpecialForm
@_tp_cache(typed=True)
def Literal(self, *parameters):
"""Special typing form to define literal types (a.k.a. value types).
This form can be used to indicate to type checkers that the corresponding
variable or function parameter has a value equivalent to the provided
literal (or one of several literals):
def validate_simple(data: Any) -> Literal[True]: # always returns True
...
MODE = Literal['r', 'rb', 'w', 'wb']
def open_helper(file: str, mode: MODE) -> str:
...
open_helper('/some/path', 'r') # Passes type check
open_helper('/other/path', 'typo') # Error in type checker
Literal[...] cannot be subclassed. At runtime, an arbitrary value
is allowed as type argument to Literal[...], but type checkers may
impose restrictions.
"""
# There is no '_type_check' call because arguments to Literal[...] are
# values, not types.
parameters = _flatten_literal_params(parameters)
try:
parameters = tuple(p for p, _ in _deduplicate(list(_value_and_type_iter(parameters))))
except TypeError: # unhashable parameters
pass
return _LiteralGenericAlias(self, parameters)
@_SpecialForm
def TypeAlias(self, parameters):
"""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example above.
"""
raise TypeError(f"{self} is not subscriptable")
@_SpecialForm
def Concatenate(self, parameters):
"""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
"""
if parameters == ():
raise TypeError("Cannot take a Concatenate of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
if not isinstance(parameters[-1], ParamSpec):
raise TypeError("The last parameter to Concatenate should be a "
"ParamSpec variable.")
msg = "Concatenate[arg, ...]: each arg must be a type."
parameters = tuple(_type_check(p, msg) for p in parameters)
return _ConcatenateGenericAlias(self, parameters)
@_SpecialForm
def TypeGuard(self, parameters):
"""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
"""
item = _type_check(parameters, f'{self} accepts only single type.')
return _GenericAlias(self, (item,))
class ForwardRef(_Final, _root=True):
"""Internal wrapper to hold a forward reference."""
__slots__ = ('__forward_arg__', '__forward_code__',
'__forward_evaluated__', '__forward_value__',
'__forward_is_argument__', '__forward_is_class__',
'__forward_module__')
def __init__(self, arg, is_argument=True, module=None, *, is_class=False):
if not isinstance(arg, str):
raise TypeError(f"Forward reference must be a string -- got {arg!r}")
try:
code = compile(arg, '<string>', 'eval')
except SyntaxError:
raise SyntaxError(f"Forward reference must be an expression -- got {arg!r}")
self.__forward_arg__ = arg
self.__forward_code__ = code
self.__forward_evaluated__ = False
self.__forward_value__ = None
self.__forward_is_argument__ = is_argument
self.__forward_is_class__ = is_class
self.__forward_module__ = module
def _evaluate(self, globalns, localns, recursive_guard):
if self.__forward_arg__ in recursive_guard:
return self
if not self.__forward_evaluated__ or localns is not globalns:
if globalns is None and localns is None:
globalns = localns = {}
elif globalns is None:
globalns = localns
elif localns is None:
localns = globalns
if self.__forward_module__ is not None:
globalns = getattr(
sys.modules.get(self.__forward_module__, None), '__dict__', globalns
)
type_ = _type_check(
eval(self.__forward_code__, globalns, localns),
"Forward references must evaluate to types.",
is_argument=self.__forward_is_argument__,
is_class=self.__forward_is_class__,
)
self.__forward_value__ = _eval_type(
type_, globalns, localns, recursive_guard | {self.__forward_arg__}
)
self.__forward_evaluated__ = True
return self.__forward_value__
def __eq__(self, other):
if not isinstance(other, ForwardRef):
return NotImplemented
if self.__forward_evaluated__ and other.__forward_evaluated__:
return (self.__forward_arg__ == other.__forward_arg__ and
self.__forward_value__ == other.__forward_value__)
return self.__forward_arg__ == other.__forward_arg__
def __hash__(self):
return hash(self.__forward_arg__)
def __or__(self, other):
return Union[self, other]
def __ror__(self, other):
return Union[other, self]
def __repr__(self):
return f'ForwardRef({self.__forward_arg__!r})'
class _TypeVarLike:
"""Mixin for TypeVar-like types (TypeVar and ParamSpec)."""
def __init__(self, bound, covariant, contravariant):
"""Used to setup TypeVars and ParamSpec's bound, covariant and
contravariant attributes.
"""
if covariant and contravariant:
raise ValueError("Bivariant types are not supported.")
self.__covariant__ = bool(covariant)
self.__contravariant__ = bool(contravariant)
if bound:
self.__bound__ = _type_check(bound, "Bound must be a type.")
else:
self.__bound__ = None
def __or__(self, right):
return Union[self, right]
def __ror__(self, left):
return Union[left, self]
def __repr__(self):
if self.__covariant__:
prefix = '+'
elif self.__contravariant__:
prefix = '-'
else:
prefix = '~'
return prefix + self.__name__
def __reduce__(self):
return self.__name__
class TypeVar( _Final, _Immutable, _TypeVarLike, _root=True):
"""Type variable.
Usage::
T = TypeVar('T') # Can be anything
A = TypeVar('A', str, bytes) # Must be str or bytes
Type variables exist primarily for the benefit of static type
checkers. They serve as the parameters for generic types as well
as for generic function definitions. See class Generic for more
information on generic types. Generic functions work as follows:
def repeat(x: T, n: int) -> List[T]:
'''Return a list containing n references to x.'''
return [x]*n
def longest(x: A, y: A) -> A:
'''Return the longest of two strings.'''
return x if len(x) >= len(y) else y
The latter example's signature is essentially the overloading
of (str, str) -> str and (bytes, bytes) -> bytes. Also note
that if the arguments are instances of some subclass of str,
the return type is still plain str.
At runtime, isinstance(x, T) and issubclass(C, T) will raise TypeError.
Type variables defined with covariant=True or contravariant=True
can be used to declare covariant or contravariant generic types.
See PEP 484 for more details. By default generic types are invariant
in all type variables.
Type variables can be introspected. e.g.:
T.__name__ == 'T'
T.__constraints__ == ()
T.__covariant__ == False
T.__contravariant__ = False
A.__constraints__ == (str, bytes)
Note that only type variables defined in global scope can be pickled.
"""
__slots__ = ('__name__', '__bound__', '__constraints__',
'__covariant__', '__contravariant__', '__dict__')
def __init__(self, name, *constraints, bound=None,
covariant=False, contravariant=False):
self.__name__ = name
super().__init__(bound, covariant, contravariant)
if constraints and bound is not None:
raise TypeError("Constraints cannot be combined with bound=...")
if constraints and len(constraints) == 1:
raise TypeError("A single constraint is not allowed")
msg = "TypeVar(name, constraint, ...): constraints must be types."
self.__constraints__ = tuple(_type_check(t, msg) for t in constraints)
def_mod = _caller()
if def_mod != 'typing':
self.__module__ = def_mod
class ParamSpecArgs(_Final, _Immutable, _root=True):
"""The args for a ParamSpec object.
Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
ParamSpecArgs objects have a reference back to their ParamSpec:
P.args.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.args"
class ParamSpecKwargs(_Final, _Immutable, _root=True):
"""The kwargs for a ParamSpec object.
Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
ParamSpecKwargs objects have a reference back to their ParamSpec:
P.kwargs.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.kwargs"
class ParamSpec(_Final, _Immutable, _TypeVarLike, _root=True):
"""Parameter specification variable.
Usage::
P = ParamSpec('P')
Parameter specification variables exist primarily for the benefit of static
type checkers. They are used to forward the parameter types of one
callable to another callable, a pattern commonly found in higher order
functions and decorators. They are only valid when used in ``Concatenate``,
or s the first argument to ``Callable``, or as parameters for user-defined
Generics. See class Generic for more information on generic types. An
example for annotating a decorator::
T = TypeVar('T')
P = ParamSpec('P')
def add_logging(f: Callable[P, T]) -> Callable[P, T]:
'''A type-safe decorator to add logging to a function.'''
def inner(*args: P.args, **kwargs: P.kwargs) -> T:
logging.info(f'{f.__name__} was called')
return f(*args, **kwargs)
return inner
@add_logging
def add_two(x: float, y: float) -> float:
'''Add two numbers together.'''
return x + y
Parameter specification variables defined with covariant=True or
contravariant=True can be used to declare covariant or contravariant
generic types. These keyword arguments are valid, but their actual semantics
are yet to be decided. See PEP 612 for details.
Parameter specification variables can be introspected. e.g.:
P.__name__ == 'T'
P.__bound__ == None
P.__covariant__ == False
P.__contravariant__ == False
Note that only parameter specification variables defined in global scope can
be pickled.
"""
__slots__ = ('__name__', '__bound__', '__covariant__', '__contravariant__',
'__dict__')
@property
def args(self):
return ParamSpecArgs(self)
@property
def kwargs(self):
return ParamSpecKwargs(self)
def __init__(self, name, *, bound=None, covariant=False, contravariant=False):
self.__name__ = name
super().__init__(bound, covariant, contravariant)
def_mod = _caller()
if def_mod != 'typing':
self.__module__ = def_mod
def _is_dunder(attr):
return attr.startswith('__') and attr.endswith('__')
class _BaseGenericAlias(_Final, _root=True):
"""The central part of internal API.
This represents a generic version of type 'origin' with type arguments 'params'.
There are two kind of these aliases: user defined and special. The special ones
are wrappers around builtin collections and ABCs in collections.abc. These must
have 'name' always set. If 'inst' is False, then the alias can't be instantiated,
this is used by e.g. typing.List and typing.Dict.
"""
def __init__(self, origin, *, inst=True, name=None):
self._inst = inst
self._name = name
self.__origin__ = origin
self.__slots__ = None # This is not documented.
def __call__(self, *args, **kwargs):
if not self._inst:
raise TypeError(f"Type {self._name} cannot be instantiated; "
f"use {self.__origin__.__name__}() instead")
result = self.__origin__(*args, **kwargs)
try:
result.__orig_class__ = self
except AttributeError:
pass
return result
def __mro_entries__(self, bases):
res = []
if self.__origin__ not in bases:
res.append(self.__origin__)
i = bases.index(self)
for b in bases[i+1:]:
if isinstance(b, _BaseGenericAlias) or issubclass(b, Generic):
break
else:
res.append(Generic)
return tuple(res)
def __getattr__(self, attr):
if attr in {'__name__', '__qualname__'}:
return self._name or self.__origin__.__name__
# We are careful for copy and pickle.
# Also for simplicity we just don't relay all dunder names
if '__origin__' in self.__dict__ and not _is_dunder(attr):
return getattr(self.__origin__, attr)
raise AttributeError(attr)
def __setattr__(self, attr, val):
if _is_dunder(attr) or attr in {'_name', '_inst', '_nparams',
'_typevar_types', '_paramspec_tvars'}:
super().__setattr__(attr, val)
else:
setattr(self.__origin__, attr, val)
def __instancecheck__(self, obj):
return self.__subclasscheck__(type(obj))
def __subclasscheck__(self, cls):
raise TypeError("Subscripted generics cannot be used with"
" class and instance checks")
# Special typing constructs Union, Optional, Generic, Callable and Tuple
# use three special attributes for internal bookkeeping of generic types:
# * __parameters__ is a tuple of unique free type parameters of a generic
# type, for example, Dict[T, T].__parameters__ == (T,);
# * __origin__ keeps a reference to a type that was subscripted,
# e.g., Union[T, int].__origin__ == Union, or the non-generic version of
# the type.
# * __args__ is a tuple of all arguments used in subscripting,
# e.g., Dict[T, int].__args__ == (T, int).
class _GenericAlias(_BaseGenericAlias, _root=True):
def __init__(self, origin, params, *, inst=True, name=None,
_typevar_types=TypeVar,
_paramspec_tvars=False):
super().__init__(origin, inst=inst, name=name)
if not isinstance(params, tuple):
params = (params,)
self.__args__ = tuple(... if a is _TypingEllipsis else
() if a is _TypingEmpty else
a for a in params)
self.__parameters__ = _collect_type_vars(params, typevar_types=_typevar_types)
self._typevar_types = _typevar_types
self._paramspec_tvars = _paramspec_tvars
if not name:
self.__module__ = origin.__module__
def __eq__(self, other):
if not isinstance(other, _GenericAlias):
return NotImplemented
return (self.__origin__ == other.__origin__
and self.__args__ == other.__args__)
def __hash__(self):
return hash((self.__origin__, self.__args__))
def __or__(self, right):
return Union[self, right]
def __ror__(self, left):
return Union[left, self]
@_tp_cache
def __getitem__(self, params):
if self.__origin__ in (Generic, Protocol):
# Can't subscript Generic[...] or Protocol[...].
raise TypeError(f"Cannot subscript already-subscripted {self}")
if not isinstance(params, tuple):
params = (params,)
params = tuple(_type_convert(p) for p in params)
if (self._paramspec_tvars
and any(isinstance(t, ParamSpec) for t in self.__parameters__)):
params = _prepare_paramspec_params(self, params)
else:
_check_generic(self, params, len(self.__parameters__))
subst = dict(zip(self.__parameters__, params))
new_args = []
for arg in self.__args__:
if isinstance(arg, self._typevar_types):
if isinstance(arg, ParamSpec):
arg = subst[arg]
if not _is_param_expr(arg):
raise TypeError(f"Expected a list of types, an ellipsis, "
f"ParamSpec, or Concatenate. Got {arg}")
else:
arg = subst[arg]
elif isinstance(arg, (_GenericAlias, GenericAlias, types.UnionType)):
subparams = arg.__parameters__
if subparams:
subargs = tuple(subst[x] for x in subparams)
arg = arg[subargs]
# Required to flatten out the args for CallableGenericAlias
if self.__origin__ == collections.abc.Callable and isinstance(arg, tuple):
new_args.extend(arg)
else:
new_args.append(arg)
return self.copy_with(tuple(new_args))
def copy_with(self, params):
return self.__class__(self.__origin__, params, name=self._name, inst=self._inst)
def __repr__(self):
if self._name:
name = 'typing.' + self._name
else:
name = _type_repr(self.__origin__)
args = ", ".join([_type_repr(a) for a in self.__args__])
return f'{name}[{args}]'
def __reduce__(self):
if self._name:
origin = globals()[self._name]
else:
origin = self.__origin__
args = tuple(self.__args__)
if len(args) == 1 and not isinstance(args[0], tuple):
args, = args
return operator.getitem, (origin, args)
def __mro_entries__(self, bases):
if isinstance(self.__origin__, _SpecialForm):
raise TypeError(f"Cannot subclass {self!r}")
if self._name: # generic version of an ABC or built-in class
return super().__mro_entries__(bases)
if self.__origin__ is Generic:
if Protocol in bases:
return ()
i = bases.index(self)
for b in bases[i+1:]:
if isinstance(b, _BaseGenericAlias) and b is not self:
return ()
return (self.__origin__,)
# _nparams is the number of accepted parameters, e.g. 0 for Hashable,
# 1 for List and 2 for Dict. It may be -1 if variable number of
# parameters are accepted (needs custom __getitem__).
class _SpecialGenericAlias(_BaseGenericAlias, _root=True):
def __init__(self, origin, nparams, *, inst=True, name=None):
if name is None:
name = origin.__name__
super().__init__(origin, inst=inst, name=name)
self._nparams = nparams
if origin.__module__ == 'builtins':
self.__doc__ = f'A generic version of {origin.__qualname__}.'
else:
self.__doc__ = f'A generic version of {origin.__module__}.{origin.__qualname__}.'
@_tp_cache
def __getitem__(self, params):
if not isinstance(params, tuple):
params = (params,)
msg = "Parameters to generic types must be types."
params = tuple(_type_check(p, msg) for p in params)
_check_generic(self, params, self._nparams)
return self.copy_with(params)
def copy_with(self, params):
return _GenericAlias(self.__origin__, params,
name=self._name, inst=self._inst)
def __repr__(self):
return 'typing.' + self._name
def __subclasscheck__(self, cls):
if isinstance(cls, _SpecialGenericAlias):
return issubclass(cls.__origin__, self.__origin__)
if not isinstance(cls, _GenericAlias):
return issubclass(cls, self.__origin__)
return super().__subclasscheck__(cls)
def __reduce__(self):
return self._name
def __or__(self, right):
return Union[self, right]
def __ror__(self, left):
return Union[left, self]
class _CallableGenericAlias(_GenericAlias, _root=True):
def __repr__(self):
assert self._name == 'Callable'
args = self.__args__
if len(args) == 2 and _is_param_expr(args[0]):
return super().__repr__()
return (f'typing.Callable'
f'[[{", ".join([_type_repr(a) for a in args[:-1]])}], '
f'{_type_repr(args[-1])}]')
def __reduce__(self):
args = self.__args__
if not (len(args) == 2 and _is_param_expr(args[0])):
args = list(args[:-1]), args[-1]
return operator.getitem, (Callable, args)
class _CallableType(_SpecialGenericAlias, _root=True):
def copy_with(self, params):
return _CallableGenericAlias(self.__origin__, params,
name=self._name, inst=self._inst,
_typevar_types=(TypeVar, ParamSpec),
_paramspec_tvars=True)
def __getitem__(self, params):
if not isinstance(params, tuple) or len(params) != 2:
raise TypeError("Callable must be used as "
"Callable[[arg, ...], result].")
args, result = params
# This relaxes what args can be on purpose to allow things like
# PEP 612 ParamSpec. Responsibility for whether a user is using
# Callable[...] properly is deferred to static type checkers.
if isinstance(args, list):
params = (tuple(args), result)
else:
params = (args, result)
return self.__getitem_inner__(params)
@_tp_cache
def __getitem_inner__(self, params):
args, result = params
msg = "Callable[args, result]: result must be a type."
result = _type_check(result, msg)
if args is Ellipsis:
return self.copy_with((_TypingEllipsis, result))
if not isinstance(args, tuple):
args = (args,)
args = tuple(_type_convert(arg) for arg in args)
params = args + (result,)
return self.copy_with(params)
class _TupleType(_SpecialGenericAlias, _root=True):
@_tp_cache
def __getitem__(self, params):
if params == ():
return self.copy_with((_TypingEmpty,))
if not isinstance(params, tuple):
params = (params,)
if len(params) == 2 and params[1] is ...:
msg = "Tuple[t, ...]: t must be a type."
p = _type_check(params[0], msg)
return self.copy_with((p, _TypingEllipsis))
msg = "Tuple[t0, t1, ...]: each t must be a type."
params = tuple(_type_check(p, msg) for p in params)
return self.copy_with(params)
class _UnionGenericAlias(_GenericAlias, _root=True):
def copy_with(self, params):
return Union[params]
def __eq__(self, other):
if not isinstance(other, (_UnionGenericAlias, types.UnionType)):
return NotImplemented
return set(self.__args__) == set(other.__args__)
def __hash__(self):
return hash(frozenset(self.__args__))
def __repr__(self):
args = self.__args__
if len(args) == 2:
if args[0] is type(None):
return f'typing.Optional[{_type_repr(args[1])}]'
elif args[1] is type(None):
return f'typing.Optional[{_type_repr(args[0])}]'
return super().__repr__()
def __instancecheck__(self, obj):
return self.__subclasscheck__(type(obj))
def __subclasscheck__(self, cls):
for arg in self.__args__:
if issubclass(cls, arg):
return True
def __reduce__(self):
func, (origin, args) = super().__reduce__()
return func, (Union, args)
def _value_and_type_iter(parameters):
return ((p, type(p)) for p in parameters)
class _LiteralGenericAlias(_GenericAlias, _root=True):
def __eq__(self, other):
if not isinstance(other, _LiteralGenericAlias):
return NotImplemented
return set(_value_and_type_iter(self.__args__)) == set(_value_and_type_iter(other.__args__))
def __hash__(self):
return hash(frozenset(_value_and_type_iter(self.__args__)))
class _ConcatenateGenericAlias(_GenericAlias, _root=True):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs,
_typevar_types=(TypeVar, ParamSpec),
_paramspec_tvars=True)
class Generic:
"""Abstract base class for generic types.
A generic type is typically declared by inheriting from
this class parameterized with one or more type variables.
For example, a generic mapping type might be defined as::
class Mapping(Generic[KT, VT]):
def __getitem__(self, key: KT) -> VT:
...
# Etc.
This class can then be used as follows::
def lookup_name(mapping: Mapping[KT, VT], key: KT, default: VT) -> VT:
try:
return mapping[key]
except KeyError:
return default
"""
__slots__ = ()
_is_protocol = False
@_tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple):
params = (params,)
if not params and cls is not Tuple:
raise TypeError(
f"Parameter list to {cls.__qualname__}[...] cannot be empty")
params = tuple(_type_convert(p) for p in params)
if cls in (Generic, Protocol):
# Generic and Protocol can only be subscripted with unique type variables.
if not all(isinstance(p, (TypeVar, ParamSpec)) for p in params):
raise TypeError(
f"Parameters to {cls.__name__}[...] must all be type variables "
f"or parameter specification variables.")
if len(set(params)) != len(params):
raise TypeError(
f"Parameters to {cls.__name__}[...] must all be unique")
else:
# Subscripting a regular Generic subclass.
if any(isinstance(t, ParamSpec) for t in cls.__parameters__):
params = _prepare_paramspec_params(cls, params)
else:
_check_generic(cls, params, len(cls.__parameters__))
return _GenericAlias(cls, params,
_typevar_types=(TypeVar, ParamSpec),
_paramspec_tvars=True)
def __init_subclass__(cls, *args, **kwargs):
super().__init_subclass__(*args, **kwargs)
tvars = []
if '__orig_bases__' in cls.__dict__:
error = Generic in cls.__orig_bases__
else:
error = Generic in cls.__bases__ and cls.__name__ != 'Protocol'
if error:
raise TypeError("Cannot inherit from plain Generic")
if '__orig_bases__' in cls.__dict__:
tvars = _collect_type_vars(cls.__orig_bases__, (TypeVar, ParamSpec))
# Look for Generic[T1, ..., Tn].
# If found, tvars must be a subset of it.
# If not found, tvars is it.
# Also check for and reject plain Generic,
# and reject multiple Generic[...].
gvars = None
for base in cls.__orig_bases__:
if (isinstance(base, _GenericAlias) and
base.__origin__ is Generic):
if gvars is not None:
raise TypeError(
"Cannot inherit from Generic[...] multiple types.")
gvars = base.__parameters__
if gvars is not None:
tvarset = set(tvars)
gvarset = set(gvars)
if not tvarset <= gvarset:
s_vars = ', '.join(str(t) for t in tvars if t not in gvarset)
s_args = ', '.join(str(g) for g in gvars)
raise TypeError(f"Some type variables ({s_vars}) are"
f" not listed in Generic[{s_args}]")
tvars = gvars
cls.__parameters__ = tuple(tvars)
class _TypingEmpty:
"""Internal placeholder for () or []. Used by TupleMeta and CallableMeta
to allow empty list/tuple in specific places, without allowing them
to sneak in where prohibited.
"""
class _TypingEllipsis:
"""Internal placeholder for ... (ellipsis)."""
_TYPING_INTERNALS = ['__parameters__', '__orig_bases__', '__orig_class__',
'_is_protocol', '_is_runtime_protocol']
_SPECIAL_NAMES = ['__abstractmethods__', '__annotations__', '__dict__', '__doc__',
'__init__', '__module__', '__new__', '__slots__',
'__subclasshook__', '__weakref__', '__class_getitem__']
# These special attributes will be not collected as protocol members.
EXCLUDED_ATTRIBUTES = _TYPING_INTERNALS + _SPECIAL_NAMES + ['_MutableMapping__marker']
def _get_protocol_attrs(cls):
"""Collect protocol members from a protocol class objects.
This includes names actually defined in the class dictionary, as well
as names that appear in annotations. Special names (above) are skipped.
"""
attrs = set()
for base in cls.__mro__[:-1]: # without object
if base.__name__ in ('Protocol', 'Generic'):
continue
annotations = getattr(base, '__annotations__', {})
for attr in list(base.__dict__.keys()) + list(annotations.keys()):
if not attr.startswith('_abc_') and attr not in EXCLUDED_ATTRIBUTES:
attrs.add(attr)
return attrs
def _is_callable_members_only(cls):
# PEP 544 prohibits using issubclass() with protocols that have non-method members.
return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
def _no_init_or_replace_init(self, *args, **kwargs):
cls = type(self)
if cls._is_protocol:
raise TypeError('Protocols cannot be instantiated')
# Already using a custom `__init__`. No need to calculate correct
# `__init__` to call. This can lead to RecursionError. See bpo-45121.
if cls.__init__ is not _no_init_or_replace_init:
return
# Initially, `__init__` of a protocol subclass is set to `_no_init_or_replace_init`.
# The first instantiation of the subclass will call `_no_init_or_replace_init` which
# searches for a proper new `__init__` in the MRO. The new `__init__`
# replaces the subclass' old `__init__` (ie `_no_init_or_replace_init`). Subsequent
# instantiation of the protocol subclass will thus use the new
# `__init__` and no longer call `_no_init_or_replace_init`.
for base in cls.__mro__:
init = base.__dict__.get('__init__', _no_init_or_replace_init)
if init is not _no_init_or_replace_init:
cls.__init__ = init
break
else:
# should not happen
cls.__init__ = object.__init__
cls.__init__(self, *args, **kwargs)
def _caller(depth=1, default='__main__'):
try:
return sys._getframe(depth + 1).f_globals.get('__name__', default)
except (AttributeError, ValueError): # For platforms without _getframe()
return None
def _allow_reckless_class_checks(depth=3):
"""Allow instance and class checks for special stdlib modules.
The abc and functools modules indiscriminately call isinstance() and
issubclass() on the whole MRO of a user class, which may contain protocols.
"""
return _caller(depth) in {'abc', 'functools', None}
_PROTO_ALLOWLIST = {
'collections.abc': [
'Callable', 'Awaitable', 'Iterable', 'Iterator', 'AsyncIterable',
'Hashable', 'Sized', 'Container', 'Collection', 'Reversible',
],
'contextlib': ['AbstractContextManager', 'AbstractAsyncContextManager'],
}
class _ProtocolMeta(ABCMeta):
# This metaclass is really unfortunate and exists only because of
# the lack of __instancehook__.
def __instancecheck__(cls, instance):
# We need this method for situations where attributes are
# assigned in __init__.
if (
getattr(cls, '_is_protocol', False) and
not getattr(cls, '_is_runtime_protocol', False) and
not _allow_reckless_class_checks(depth=2)
):
raise TypeError("Instance and class checks can only be used with"
" @runtime_checkable protocols")
if ((not getattr(cls, '_is_protocol', False) or
_is_callable_members_only(cls)) and
issubclass(instance.__class__, cls)):
return True
if cls._is_protocol:
if all(hasattr(instance, attr) and
# All *methods* can be blocked by setting them to None.
(not callable(getattr(cls, attr, None)) or
getattr(instance, attr) is not None)
for attr in _get_protocol_attrs(cls)):
return True
return super().__instancecheck__(instance)
class Protocol(Generic, metaclass=_ProtocolMeta):
"""Base class for protocol classes.
Protocol classes are defined as::
class Proto(Protocol):
def meth(self) -> int:
...
Such classes are primarily used with static type checkers that recognize
structural subtyping (static duck-typing), for example::
class C:
def meth(self) -> int:
return 0
def func(x: Proto) -> int:
return x.meth()
func(C()) # Passes static type check
See PEP 544 for details. Protocol classes decorated with
@typing.runtime_checkable act as simple-minded runtime protocols that check
only the presence of given attributes, ignoring their type signatures.
Protocol classes can be generic, they are defined as::
class GenProto(Protocol[T]):
def meth(self) -> T:
...
"""
__slots__ = ()
_is_protocol = True
_is_runtime_protocol = False
def __init_subclass__(cls, *args, **kwargs):
super().__init_subclass__(*args, **kwargs)
# Determine if this is a protocol or a concrete subclass.
if not cls.__dict__.get('_is_protocol', False):
cls._is_protocol = any(b is Protocol for b in cls.__bases__)
# Set (or override) the protocol subclass hook.
def _proto_hook(other):
if not cls.__dict__.get('_is_protocol', False):
return NotImplemented
# First, perform various sanity checks.
if not getattr(cls, '_is_runtime_protocol', False):
if _allow_reckless_class_checks():
return NotImplemented
raise TypeError("Instance and class checks can only be used with"
" @runtime_checkable protocols")
if not _is_callable_members_only(cls):
if _allow_reckless_class_checks():
return NotImplemented
raise TypeError("Protocols with non-method members"
" don't support issubclass()")
if not isinstance(other, type):
# Same error message as for issubclass(1, int).
raise TypeError('issubclass() arg 1 must be a class')
# Second, perform the actual structural compatibility check.
for attr in _get_protocol_attrs(cls):
for base in other.__mro__:
# Check if the members appears in the class dictionary...
if attr in base.__dict__:
if base.__dict__[attr] is None:
return NotImplemented
break
# ...or in annotations, if it is a sub-protocol.
annotations = getattr(base, '__annotations__', {})
if (isinstance(annotations, collections.abc.Mapping) and
attr in annotations and
issubclass(other, Generic) and other._is_protocol):
break
else:
return NotImplemented
return True
if '__subclasshook__' not in cls.__dict__:
cls.__subclasshook__ = _proto_hook
# We have nothing more to do for non-protocols...
if not cls._is_protocol:
return
# ... otherwise check consistency of bases, and prohibit instantiation.
for base in cls.__bases__:
if not (base in (object, Generic) or
base.__module__ in _PROTO_ALLOWLIST and
base.__name__ in _PROTO_ALLOWLIST[base.__module__] or
issubclass(base, Generic) and base._is_protocol):
raise TypeError('Protocols can only inherit from other'
' protocols, got %r' % base)
cls.__init__ = _no_init_or_replace_init
class _AnnotatedAlias(_GenericAlias, _root=True):
"""Runtime representation of an annotated type.
At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
with extra annotations. The alias behaves like a normal typing alias,
instantiating is the same as instantiating the underlying type, binding
it to types is also the same.
"""
def __init__(self, origin, metadata):
if isinstance(origin, _AnnotatedAlias):
metadata = origin.__metadata__ + metadata
origin = origin.__origin__
super().__init__(origin, origin)
self.__metadata__ = metadata
def copy_with(self, params):
assert len(params) == 1
new_type = params[0]
return _AnnotatedAlias(new_type, self.__metadata__)
def __repr__(self):
return "typing.Annotated[{}, {}]".format(
_type_repr(self.__origin__),
", ".join(repr(a) for a in self.__metadata__)
)
def __reduce__(self):
return operator.getitem, (
Annotated, (self.__origin__,) + self.__metadata__
)
def __eq__(self, other):
if not isinstance(other, _AnnotatedAlias):
return NotImplemented
return (self.__origin__ == other.__origin__
and self.__metadata__ == other.__metadata__)
def __hash__(self):
return hash((self.__origin__, self.__metadata__))
def __getattr__(self, attr):
if attr in {'__name__', '__qualname__'}:
return 'Annotated'
return super().__getattr__(attr)
class Annotated:
"""Add context specific metadata to a type.
Example: Annotated[int, runtime_check.Unsigned] indicates to the
hypothetical runtime_check module that this type is an unsigned int.
Every other consumer of this type can ignore this metadata and treat
this type as int.
The first argument to Annotated must be a valid type.
Details:
- It's an error to call `Annotated` with less than two arguments.
- Nested Annotated are flattened::
Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
- Instantiating an annotated type is equivalent to instantiating the
underlying type::
Annotated[C, Ann1](5) == C(5)
- Annotated can be used as a generic type alias::
Optimized = Annotated[T, runtime.Optimize()]
Optimized[int] == Annotated[int, runtime.Optimize()]
OptimizedList = Annotated[List[T], runtime.Optimize()]
OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
"""
__slots__ = ()
def __new__(cls, *args, **kwargs):
raise TypeError("Type Annotated cannot be instantiated.")
@_tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple) or len(params) < 2:
raise TypeError("Annotated[...] should be used "
"with at least two arguments (a type and an "
"annotation).")
msg = "Annotated[t, ...]: t must be a type."
origin = _type_check(params[0], msg)
metadata = tuple(params[1:])
return _AnnotatedAlias(origin, metadata)
def __init_subclass__(cls, *args, **kwargs):
raise TypeError(
"Cannot subclass {}.Annotated".format(cls.__module__)
)
def runtime_checkable(cls):
"""Mark a protocol class as a runtime protocol.
Such protocol can be used with isinstance() and issubclass().
Raise TypeError if applied to a non-protocol class.
This allows a simple-minded structural check very similar to
one trick ponies in collections.abc such as Iterable.
For example::
@runtime_checkable
class Closable(Protocol):
def close(self): ...
assert isinstance(open('/some/file'), Closable)
Warning: this will check only the presence of the required methods,
not their type signatures!
"""
if not issubclass(cls, Generic) or not cls._is_protocol:
raise TypeError('@runtime_checkable can be only applied to protocol classes,'
' got %r' % cls)
cls._is_runtime_protocol = True
return cls
def cast(typ, val):
"""Cast a value to a type.
This returns the value unchanged. To the type checker this
signals that the return value has the designated type, but at
runtime we intentionally don't check anything (we want this
to be as fast as possible).
"""
return val
def _get_defaults(func):
"""Internal helper to extract the default arguments, by name."""
try:
code = func.__code__
except AttributeError:
# Some built-in functions don't have __code__, __defaults__, etc.
return {}
pos_count = code.co_argcount
arg_names = code.co_varnames
arg_names = arg_names[:pos_count]
defaults = func.__defaults__ or ()
kwdefaults = func.__kwdefaults__
res = dict(kwdefaults) if kwdefaults else {}
pos_offset = pos_count - len(defaults)
for name, value in zip(arg_names[pos_offset:], defaults):
assert name not in res
res[name] = value
return res
_allowed_types = (types.FunctionType, types.BuiltinFunctionType,
types.MethodType, types.ModuleType,
WrapperDescriptorType, MethodWrapperType, MethodDescriptorType)
def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
"""Return type hints for an object.
This is often the same as obj.__annotations__, but it handles
forward references encoded as string literals, adds Optional[t] if a
default value equal to None is set and recursively replaces all
'Annotated[T, ...]' with 'T' (unless 'include_extras=True').
The argument may be a module, class, method, or function. The annotations
are returned as a dictionary. For classes, annotations include also
inherited members.
TypeError is raised if the argument is not of a type that can contain
annotations, and an empty dictionary is returned if no annotations are
present.
BEWARE -- the behavior of globalns and localns is counterintuitive
(unless you are familiar with how eval() and exec() work). The
search order is locals first, then globals.
- If no dict arguments are passed, an attempt is made to use the
globals from obj (or the respective module's globals for classes),
and these are also used as the locals. If the object does not appear
to have globals, an empty dictionary is used. For classes, the search
order is globals first then locals.
- If one dict argument is passed, it is used for both globals and
locals.
- If two dict arguments are passed, they specify globals and
locals, respectively.
"""
if getattr(obj, '__no_type_check__', None):
return {}
# Classes require a special treatment.
if isinstance(obj, type):
hints = {}
for base in reversed(obj.__mro__):
if globalns is None:
base_globals = getattr(sys.modules.get(base.__module__, None), '__dict__', {})
else:
base_globals = globalns
ann = base.__dict__.get('__annotations__', {})
if isinstance(ann, types.GetSetDescriptorType):
ann = {}
base_locals = dict(vars(base)) if localns is None else localns
if localns is None and globalns is None:
# This is surprising, but required. Before Python 3.10,
# get_type_hints only evaluated the globalns of
# a class. To maintain backwards compatibility, we reverse
# the globalns and localns order so that eval() looks into
# *base_globals* first rather than *base_locals*.
# This only affects ForwardRefs.
base_globals, base_locals = base_locals, base_globals
for name, value in ann.items():
if value is None:
value = type(None)
if isinstance(value, str):
value = ForwardRef(value, is_argument=False, is_class=True)
value = _eval_type(value, base_globals, base_locals)
hints[name] = value
return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()}
if globalns is None:
if isinstance(obj, types.ModuleType):
globalns = obj.__dict__
else:
nsobj = obj
# Find globalns for the unwrapped object.
while hasattr(nsobj, '__wrapped__'):
nsobj = nsobj.__wrapped__
globalns = getattr(nsobj, '__globals__', {})
if localns is None:
localns = globalns
elif localns is None:
localns = globalns
hints = getattr(obj, '__annotations__', None)
if hints is None:
# Return empty annotations for something that _could_ have them.
if isinstance(obj, _allowed_types):
return {}
else:
raise TypeError('{!r} is not a module, class, method, '
'or function.'.format(obj))
defaults = _get_defaults(obj)
hints = dict(hints)
for name, value in hints.items():
if value is None:
value = type(None)
if isinstance(value, str):
# class-level forward refs were handled above, this must be either
# a module-level annotation or a function argument annotation
value = ForwardRef(
value,
is_argument=not isinstance(obj, types.ModuleType),
is_class=False,
)
value = _eval_type(value, globalns, localns)
if name in defaults and defaults[name] is None:
value = Optional[value]
hints[name] = value
return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()}
def _strip_annotations(t):
"""Strips the annotations from a given type.
"""
if isinstance(t, _AnnotatedAlias):
return _strip_annotations(t.__origin__)
if isinstance(t, _GenericAlias):
stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return t.copy_with(stripped_args)
if isinstance(t, GenericAlias):
stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return GenericAlias(t.__origin__, stripped_args)
if isinstance(t, types.UnionType):
stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return functools.reduce(operator.or_, stripped_args)
return t
def get_origin(tp):
"""Get the unsubscripted version of a type.
This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
and Annotated. Return None for unsupported types. Examples::
get_origin(Literal[42]) is Literal
get_origin(int) is None
get_origin(ClassVar[int]) is ClassVar
get_origin(Generic) is Generic
get_origin(Generic[T]) is Generic
get_origin(Union[T, int]) is Union
get_origin(List[Tuple[T, T]][int]) == list
get_origin(P.args) is P
"""
if isinstance(tp, _AnnotatedAlias):
return Annotated
if isinstance(tp, (_BaseGenericAlias, GenericAlias,
ParamSpecArgs, ParamSpecKwargs)):
return tp.__origin__
if tp is Generic:
return Generic
if isinstance(tp, types.UnionType):
return types.UnionType
return None
def get_args(tp):
"""Get type arguments with all substitutions performed.
For unions, basic simplifications used by Union constructor are performed.
Examples::
get_args(Dict[str, int]) == (str, int)
get_args(int) == ()
get_args(Union[int, Union[T, int], str][int]) == (int, str)
get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
get_args(Callable[[], T][int]) == ([], int)
"""
if isinstance(tp, _AnnotatedAlias):
return (tp.__origin__,) + tp.__metadata__
if isinstance(tp, (_GenericAlias, GenericAlias)):
res = tp.__args__
if (tp.__origin__ is collections.abc.Callable
and not (len(res) == 2 and _is_param_expr(res[0]))):
res = (list(res[:-1]), res[-1])
return res
if isinstance(tp, types.UnionType):
return tp.__args__
return ()
def is_typeddict(tp):
"""Check if an annotation is a TypedDict class
For example::
class Film(TypedDict):
title: str
year: int
is_typeddict(Film) # => True
is_typeddict(Union[list, str]) # => False
"""
return isinstance(tp, _TypedDictMeta)
def no_type_check(arg):
"""Decorator to indicate that annotations are not type hints.
The argument must be a class or function; if it is a class, it
applies recursively to all methods and classes defined in that class
(but not to methods defined in its superclasses or subclasses).
This mutates the function(s) or class(es) in place.
"""
if isinstance(arg, type):
arg_attrs = arg.__dict__.copy()
for attr, val in arg.__dict__.items():
if val in arg.__bases__ + (arg,):
arg_attrs.pop(attr)
for obj in arg_attrs.values():
if isinstance(obj, types.FunctionType):
obj.__no_type_check__ = True
if isinstance(obj, type):
no_type_check(obj)
try:
arg.__no_type_check__ = True
except TypeError: # built-in classes
pass
return arg
def no_type_check_decorator(decorator):
"""Decorator to give another decorator the @no_type_check effect.
This wraps the decorator with something that wraps the decorated
function in @no_type_check.
"""
@functools.wraps(decorator)
def wrapped_decorator(*args, **kwds):
func = decorator(*args, **kwds)
func = no_type_check(func)
return func
return wrapped_decorator
def _overload_dummy(*args, **kwds):
"""Helper for @overload to raise when called."""
raise NotImplementedError(
"You should not call an overloaded function. "
"A series of @overload-decorated functions "
"outside a stub module should always be followed "
"by an implementation that is not @overload-ed.")
def overload(func):
"""Decorator for overloaded functions/methods.
In a stub file, place two or more stub definitions for the same
function in a row, each decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
In a non-stub file (i.e. a regular .py file), do the same but
follow it with an implementation. The implementation should *not*
be decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
def utf8(value):
# implementation goes here
"""
return _overload_dummy
def final(f):
"""A decorator to indicate final methods and final classes.
Use this decorator to indicate to type checkers that the decorated
method cannot be overridden, and decorated class cannot be subclassed.
For example:
class Base:
@final
def done(self) -> None:
...
class Sub(Base):
def done(self) -> None: # Error reported by type checker
...
@final
class Leaf:
...
class Other(Leaf): # Error reported by type checker
...
There is no runtime checking of these properties.
"""
return f
# Some unconstrained type variables. These are used by the container types.
# (These are not for export.)
T = TypeVar('T') # Any type.
KT = TypeVar('KT') # Key type.
VT = TypeVar('VT') # Value type.
T_co = TypeVar('T_co', covariant=True) # Any type covariant containers.
V_co = TypeVar('V_co', covariant=True) # Any type covariant containers.
VT_co = TypeVar('VT_co', covariant=True) # Value type covariant containers.
T_contra = TypeVar('T_contra', contravariant=True) # Ditto contravariant.
# Internal type variable used for Type[].
CT_co = TypeVar('CT_co', covariant=True, bound=type)
# A useful type variable with constraints. This represents string types.
# (This one *is* for export!)
AnyStr = TypeVar('AnyStr', bytes, str)
# Various ABCs mimicking those in collections.abc.
_alias = _SpecialGenericAlias
Hashable = _alias(collections.abc.Hashable, 0) # Not generic.
Awaitable = _alias(collections.abc.Awaitable, 1)
Coroutine = _alias(collections.abc.Coroutine, 3)
AsyncIterable = _alias(collections.abc.AsyncIterable, 1)
AsyncIterator = _alias(collections.abc.AsyncIterator, 1)
Iterable = _alias(collections.abc.Iterable, 1)
Iterator = _alias(collections.abc.Iterator, 1)
Reversible = _alias(collections.abc.Reversible, 1)
Sized = _alias(collections.abc.Sized, 0) # Not generic.
Container = _alias(collections.abc.Container, 1)
Collection = _alias(collections.abc.Collection, 1)
Callable = _CallableType(collections.abc.Callable, 2)
Callable.__doc__ = \
"""Callable type; Callable[[int], str] is a function of (int) -> str.
The subscription syntax must always be used with exactly two
values: the argument list and the return type. The argument list
must be a list of types or ellipsis; the return type must be a single type.
There is no syntax to indicate optional or keyword arguments,
such function types are rarely used as callback types.
"""
AbstractSet = _alias(collections.abc.Set, 1, name='AbstractSet')
MutableSet = _alias(collections.abc.MutableSet, 1)
# NOTE: Mapping is only covariant in the value type.
Mapping = _alias(collections.abc.Mapping, 2)
MutableMapping = _alias(collections.abc.MutableMapping, 2)
Sequence = _alias(collections.abc.Sequence, 1)
MutableSequence = _alias(collections.abc.MutableSequence, 1)
ByteString = _alias(collections.abc.ByteString, 0) # Not generic
# Tuple accepts variable number of parameters.
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
Tuple.__doc__ = \
"""Tuple type; Tuple[X, Y] is the cross-product type of X and Y.
Example: Tuple[T1, T2] is a tuple of two elements corresponding
to type variables T1 and T2. Tuple[int, float, str] is a tuple
of an int, a float and a string.
To specify a variable-length tuple of homogeneous type, use Tuple[T, ...].
"""
List = _alias(list, 1, inst=False, name='List')
Deque = _alias(collections.deque, 1, name='Deque')
Set = _alias(set, 1, inst=False, name='Set')
FrozenSet = _alias(frozenset, 1, inst=False, name='FrozenSet')
MappingView = _alias(collections.abc.MappingView, 1)
KeysView = _alias(collections.abc.KeysView, 1)
ItemsView = _alias(collections.abc.ItemsView, 2)
ValuesView = _alias(collections.abc.ValuesView, 1)
ContextManager = _alias(contextlib.AbstractContextManager, 1, name='ContextManager')
AsyncContextManager = _alias(contextlib.AbstractAsyncContextManager, 1, name='AsyncContextManager')
Dict = _alias(dict, 2, inst=False, name='Dict')
DefaultDict = _alias(collections.defaultdict, 2, name='DefaultDict')
OrderedDict = _alias(collections.OrderedDict, 2)
Counter = _alias(collections.Counter, 1)
ChainMap = _alias(collections.ChainMap, 2)
Generator = _alias(collections.abc.Generator, 3)
AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2)
Type = _alias(type, 1, inst=False, name='Type')
Type.__doc__ = \
"""A special construct usable to annotate class objects.
For example, suppose we have the following classes::
class User: ... # Abstract base for User classes
class BasicUser(User): ...
class ProUser(User): ...
class TeamUser(User): ...
And a function that takes a class argument that's a subclass of
User and returns an instance of the corresponding class::
U = TypeVar('U', bound=User)
def new_user(user_class: Type[U]) -> U:
user = user_class()
# (Here we could write the user object to a database)
return user
joe = new_user(BasicUser)
At this point the type checker knows that joe has type BasicUser.
"""
@runtime_checkable
class SupportsInt(Protocol):
"""An ABC with one abstract method __int__."""
__slots__ = ()
@abstractmethod
def __int__(self) -> int:
pass
@runtime_checkable
class SupportsFloat(Protocol):
"""An ABC with one abstract method __float__."""
__slots__ = ()
@abstractmethod
def __float__(self) -> float:
pass
@runtime_checkable
class SupportsComplex(Protocol):
"""An ABC with one abstract method __complex__."""
__slots__ = ()
@abstractmethod
def __complex__(self) -> complex:
pass
@runtime_checkable
class SupportsBytes(Protocol):
"""An ABC with one abstract method __bytes__."""
__slots__ = ()
@abstractmethod
def __bytes__(self) -> bytes:
pass
@runtime_checkable
class SupportsIndex(Protocol):
"""An ABC with one abstract method __index__."""
__slots__ = ()
@abstractmethod
def __index__(self) -> int:
pass
@runtime_checkable
class SupportsAbs(Protocol[T_co]):
"""An ABC with one abstract method __abs__ that is covariant in its return type."""
__slots__ = ()
@abstractmethod
def __abs__(self) -> T_co:
pass
@runtime_checkable
class SupportsRound(Protocol[T_co]):
"""An ABC with one abstract method __round__ that is covariant in its return type."""
__slots__ = ()
@abstractmethod
def __round__(self, ndigits: int = 0) -> T_co:
pass
def _make_nmtuple(name, types, module, defaults = ()):
fields = [n for n, t in types]
types = {n: _type_check(t, f"field {n} annotation must be a type")
for n, t in types}
nm_tpl = collections.namedtuple(name, fields,
defaults=defaults, module=module)
nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = types
return nm_tpl
# attributes prohibited to set in NamedTuple class syntax
_prohibited = frozenset({'__new__', '__init__', '__slots__', '__getnewargs__',
'_fields', '_field_defaults',
'_make', '_replace', '_asdict', '_source'})
_special = frozenset({'__module__', '__name__', '__annotations__'})
class NamedTupleMeta(type):
def __new__(cls, typename, bases, ns):
assert bases[0] is _NamedTuple
types = ns.get('__annotations__', {})
default_names = []
for field_name in types:
if field_name in ns:
default_names.append(field_name)
elif default_names:
raise TypeError(f"Non-default namedtuple field {field_name} "
f"cannot follow default field"
f"{'s' if len(default_names) > 1 else ''} "
f"{', '.join(default_names)}")
nm_tpl = _make_nmtuple(typename, types.items(),
defaults=[ns[n] for n in default_names],
module=ns['__module__'])
# update from user namespace without overriding special namedtuple attributes
for key in ns:
if key in _prohibited:
raise AttributeError("Cannot overwrite NamedTuple attribute " + key)
elif key not in _special and key not in nm_tpl._fields:
setattr(nm_tpl, key, ns[key])
return nm_tpl
def NamedTuple(typename, fields=None, /, **kwargs):
"""Typed version of namedtuple.
Usage in Python versions >= 3.6::
class Employee(NamedTuple):
name: str
id: int
This is equivalent to::
Employee = collections.namedtuple('Employee', ['name', 'id'])
The resulting class has an extra __annotations__ attribute, giving a
dict that maps field names to types. (The field names are also in
the _fields attribute, which is part of the namedtuple API.)
Alternative equivalent keyword syntax is also accepted::
Employee = NamedTuple('Employee', name=str, id=int)
In Python versions <= 3.5 use::
Employee = NamedTuple('Employee', [('name', str), ('id', int)])
"""
if fields is None:
fields = kwargs.items()
elif kwargs:
raise TypeError("Either list of fields or keywords"
" can be provided to NamedTuple, not both")
return _make_nmtuple(typename, fields, module=_caller())
_NamedTuple = type.__new__(NamedTupleMeta, 'NamedTuple', (), {})
def _namedtuple_mro_entries(bases):
if len(bases) > 1:
raise TypeError("Multiple inheritance with NamedTuple is not supported")
assert bases[0] is NamedTuple
return (_NamedTuple,)
NamedTuple.__mro_entries__ = _namedtuple_mro_entries
class _TypedDictMeta(type):
def __new__(cls, name, bases, ns, total=True):
"""Create new typed dict class object.
This method is called when TypedDict is subclassed,
or when TypedDict is instantiated. This way
TypedDict supports all three syntax forms described in its docstring.
Subclasses and instances of TypedDict return actual dictionaries.
"""
for base in bases:
if type(base) is not _TypedDictMeta:
raise TypeError('cannot inherit from both a TypedDict type '
'and a non-TypedDict base class')
tp_dict = type.__new__(_TypedDictMeta, name, (dict,), ns)
annotations = {}
own_annotations = ns.get('__annotations__', {})
own_annotation_keys = set(own_annotations.keys())
msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
own_annotations = {
n: _type_check(tp, msg, module=tp_dict.__module__)
for n, tp in own_annotations.items()
}
required_keys = set()
optional_keys = set()
for base in bases:
annotations.update(base.__dict__.get('__annotations__', {}))
required_keys.update(base.__dict__.get('__required_keys__', ()))
optional_keys.update(base.__dict__.get('__optional_keys__', ()))
annotations.update(own_annotations)
if total:
required_keys.update(own_annotation_keys)
else:
optional_keys.update(own_annotation_keys)
tp_dict.__annotations__ = annotations
tp_dict.__required_keys__ = frozenset(required_keys)
tp_dict.__optional_keys__ = frozenset(optional_keys)
if not hasattr(tp_dict, '__total__'):
tp_dict.__total__ = total
return tp_dict
__call__ = dict # static method
def __subclasscheck__(cls, other):
# Typed dicts are only for static structural subtyping.
raise TypeError('TypedDict does not support instance and class checks')
__instancecheck__ = __subclasscheck__
def TypedDict(typename, fields=None, /, *, total=True, **kwargs):
"""A simple typed namespace. At runtime it is equivalent to a plain dict.
TypedDict creates a dictionary type that expects all of its
instances to have a certain set of keys, where each key is
associated with a value of a consistent type. This expectation
is not checked at runtime but is only enforced by type checkers.
Usage::
class Point2D(TypedDict):
x: int
y: int
label: str
a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
The type info can be accessed via the Point2D.__annotations__ dict, and
the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
TypedDict supports two additional equivalent forms::
Point2D = TypedDict('Point2D', x=int, y=int, label=str)
Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
By default, all keys must be present in a TypedDict. It is possible
to override this by specifying totality.
Usage::
class point2D(TypedDict, total=False):
x: int
y: int
This means that a point2D TypedDict can have any of the keys omitted.A type
checker is only expected to support a literal False or True as the value of
the total argument. True is the default, and makes all items defined in the
class body be required.
The class syntax is only supported in Python 3.6+, while two other
syntax forms work for Python 2.7 and 3.2+
"""
if fields is None:
fields = kwargs
elif kwargs:
raise TypeError("TypedDict takes either a dict or keyword arguments,"
" but not both")
ns = {'__annotations__': dict(fields)}
module = _caller()
if module is not None:
# Setting correct module is necessary to make typed dict classes pickleable.
ns['__module__'] = module
return _TypedDictMeta(typename, (), ns, total=total)
_TypedDict = type.__new__(_TypedDictMeta, 'TypedDict', (), {})
TypedDict.__mro_entries__ = lambda bases: (_TypedDict,)
class NewType:
"""NewType creates simple unique types with almost zero
runtime overhead. NewType(name, tp) is considered a subtype of tp
by static type checkers. At runtime, NewType(name, tp) returns
a dummy function that simply returns its argument. Usage::
UserId = NewType('UserId', int)
def name_by_id(user_id: UserId) -> str:
...
UserId('user') # Fails type check
name_by_id(42) # Fails type check
name_by_id(UserId(42)) # OK
num = UserId(5) + 1 # type: int
"""
__call__ = _idfunc
def __init__(self, name, tp):
self.__qualname__ = name
if '.' in name:
name = name.rpartition('.')[-1]
self.__name__ = name
self.__supertype__ = tp
def_mod = _caller()
if def_mod != 'typing':
self.__module__ = def_mod
def __repr__(self):
return f'{self.__module__}.{self.__qualname__}'
def __reduce__(self):
return self.__qualname__
def __or__(self, other):
return Union[self, other]
def __ror__(self, other):
return Union[other, self]
# Python-version-specific alias (Python 2: unicode; Python 3: str)
Text = str
# Constant that's True when type checking, but False here.
TYPE_CHECKING = False
class IO(Generic[AnyStr]):
"""Generic base class for TextIO and BinaryIO.
This is an abstract, generic version of the return of open().
NOTE: This does not distinguish between the different possible
classes (text vs. binary, read vs. write vs. read/write,
append-only, unbuffered). The TextIO and BinaryIO subclasses
below capture the distinctions between text vs. binary, which is
pervasive in the interface; however we currently do not offer a
way to track the other distinctions in the type system.
"""
__slots__ = ()
@property
@abstractmethod
def mode(self) -> str:
pass
@property
@abstractmethod
def name(self) -> str:
pass
@abstractmethod
def close(self) -> None:
pass
@property
@abstractmethod
def closed(self) -> bool:
pass
@abstractmethod
def fileno(self) -> int:
pass
@abstractmethod
def flush(self) -> None:
pass
@abstractmethod
def isatty(self) -> bool:
pass
@abstractmethod
def read(self, n: int = -1) -> AnyStr:
pass
@abstractmethod
def readable(self) -> bool:
pass
@abstractmethod
def readline(self, limit: int = -1) -> AnyStr:
pass
@abstractmethod
def readlines(self, hint: int = -1) -> List[AnyStr]:
pass
@abstractmethod
def seek(self, offset: int, whence: int = 0) -> int:
pass
@abstractmethod
def seekable(self) -> bool:
pass
@abstractmethod
def tell(self) -> int:
pass
@abstractmethod
def truncate(self, size: int = None) -> int:
pass
@abstractmethod
def writable(self) -> bool:
pass
@abstractmethod
def write(self, s: AnyStr) -> int:
pass
@abstractmethod
def writelines(self, lines: List[AnyStr]) -> None:
pass
@abstractmethod
def __enter__(self) -> 'IO[AnyStr]':
pass
@abstractmethod
def __exit__(self, type, value, traceback) -> None:
pass
class BinaryIO(IO[bytes]):
"""Typed version of the return of open() in binary mode."""
__slots__ = ()
@abstractmethod
def write(self, s: Union[bytes, bytearray]) -> int:
pass
@abstractmethod
def __enter__(self) -> 'BinaryIO':
pass
class TextIO(IO[str]):
"""Typed version of the return of open() in text mode."""
__slots__ = ()
@property
@abstractmethod
def buffer(self) -> BinaryIO:
pass
@property
@abstractmethod
def encoding(self) -> str:
pass
@property
@abstractmethod
def errors(self) -> Optional[str]:
pass
@property
@abstractmethod
def line_buffering(self) -> bool:
pass
@property
@abstractmethod
def newlines(self) -> Any:
pass
@abstractmethod
def __enter__(self) -> 'TextIO':
pass
class _DeprecatedType(type):
def __getattribute__(cls, name):
if name not in ("__dict__", "__module__") and name in cls.__dict__:
warnings.warn(
f"{cls.__name__} is deprecated, import directly "
f"from typing instead. {cls.__name__} will be removed "
"in Python 3.12.",
DeprecationWarning,
stacklevel=2,
)
return super().__getattribute__(name)
class io(metaclass=_DeprecatedType):
"""Wrapper namespace for IO generic classes."""
__all__ = ['IO', 'TextIO', 'BinaryIO']
IO = IO
TextIO = TextIO
BinaryIO = BinaryIO
io.__name__ = __name__ + '.io'
sys.modules[io.__name__] = io
Pattern = _alias(stdlib_re.Pattern, 1)
Match = _alias(stdlib_re.Match, 1)
class re(metaclass=_DeprecatedType):
"""Wrapper namespace for re type aliases."""
__all__ = ['Pattern', 'Match']
Pattern = Pattern
Match = Match
re.__name__ = __name__ + '.re'
sys.modules[re.__name__] = re
|
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello, Pavan..."
|
import os
print("Hello World. I am Python")
if "a" in os.environ and "b" in os.environ:
firstnum = os.environ['a']
secondnum = os.environ['b']
sum = int(firstnum) + int(secondnum)
print('Sum of {0} & {1} is {2}'.format(firstnum,secondnum,sum))
else:
print('No parameters passed to calculate the sum.')
print("Python Bye!")
|
import re
import time
from optparse import OptionParser
from beneficialtweets import train, predict
from utils import ProgressBarThread
parser = OptionParser()
parser.add_option('--train', dest='train', help='Build the classifier from given dataset', default='')
parser.add_option('--predict', dest='predict', help='Classify tweet from given text or tweet_url', default='')
parser.add_option('--query_tweets', dest='query_tweets', help='Fetch all tweets acording to given scrape parameter.\nSend scrape parameter by writting a string that can be converted into dictionary.\nA list of acceptable dictionary keys : ["query_string", "limit", "lang", "poolsize", "file_name", "save"]\n', default='')
parser.add_option('-p', '--pos_tweets', dest='pos_path', help='Path to positive tweets dataset, required when train = true', default='')
parser.add_option('-n', '--neg_tweets', dest='neg_path', help='Path to negative tweets dataset, required when train = true', default='')
parser.add_option('-r', '--ratio', dest='ratio', help='Train test split ratio, optional when train = true', default='')
parser.add_option('-a', '--algorithms', dest='algorithms', help='Classifier algorithm ("all", "tf", "svm"), required when train = true', default='')
parser.add_option('-m', '--model', dest='model', help='Classifier model load path, optional if not specific default model(random forest) will be use', default='')
if __name__ == '__main__':
progress_bar = ProgressBarThread('Computing')
(options, args) = parser.parse_args()
if options.train or options.predict:
if options.train:
if options.pos_path and options.neg_path:
args = [options.pos_path, options.neg_path]
if options.ratio:
args.append(float(options.ratio))
else:
args.append(.3)
if options.algorithms and options.algorithms in ['all', 'svm', 'tf']:
args.append(options.algorithms)
else:
args.append('all')
progress_bar.start()
train(*args)
progress_bar.stop()
else:
raise SyntaxError("Required both of positive and negative datasets")
elif options.predict:
tweet_url_matcher = re.compile(r'^(http://|https://|https://www\.|http://www\.)twitter\.com/.+/status/[0-9]{19}(/|)$').match
if bool(tweet_url_matcher(options.predict)):
from scraper import tweet
tweet_text = tweet.scrape(options.predict)
else:
tweet_text = options.predict
progress_bar.start()
if options.model:
pred_class = predict(tweet_text, options.model)
else:
pred_class = predict(tweet_text)
time.sleep(.1)
print(f'\nText : {tweet_text}\nClass : {pred_class}')
progress_bar.stop()
elif options.query_tweets:
from scraper import tweets
args = []
args_dict = eval(options.query_tweets)
if not isinstance(args_dict, dict):
raise ValueError('query_tweets parameter must be a string in dictionary form')
if 'query_string' in args_dict.keys():
args.append(args_dict['query_string'])
else:
args.append('(#COVID19,#COVID-19,#COVID_19)')
if 'limit' in args_dict.keys():
args.append(args_dict['limit'])
else:
args.append(1)
if 'lang' in args_dict.keys():
args.append(args_dict['lang'])
else:
args.append('th')
if 'poolsize' in args_dict.keys():
args.append(args_dict['poolsize'])
else:
args.append(1)
if 'file_name' in args_dict.keys():
args.append(args_dict['file_name'])
else:
args.append('covid19_raw_dataset')
if 'save' in args_dict.keys():
args.append(args_dict['save'])
else:
args.append(True)
progress_bar.start()
tweets.scrape(*args)
progress_bar.stop()
else:
raise SyntaxError("Required train, predict or query_tweets argument")
|
import abc
from enum import Enum
from typing import Optional
from bionorm.common.models.util import Location
class BioEntityType(Enum):
GENE = 'GENE'
SPECIES = 'SPECIES'
DISEASE = 'DISEASE'
CHEMICAL = 'CHEMICAL'
class BioEntity(abc.ABC):
def __init__(self, location: Location, text: str):
self.location = location
self.text = text
self.id: Optional[str] = None
def __str__(self):
return f'{self.location}\t{self.text}\t{self.id}'
def __repr__(self):
return str(self)
@property
@abc.abstractmethod
def e_type(self) -> BioEntityType:
pass
class SpeciesMention(BioEntity):
@property
def e_type(self) -> BioEntityType:
return BioEntityType.SPECIES
class GeneMention(BioEntity):
@property
def e_type(self) -> BioEntityType:
return BioEntityType.GENE
class DiseaseMention(BioEntity):
@property
def e_type(self) -> BioEntityType:
return BioEntityType.DISEASE
class ChemicalMention(BioEntity):
@property
def e_type(self) -> BioEntityType:
return BioEntityType.CHEMICAL
|
# coding: utf-8
from __future__ import unicode_literals
import calendar
import copy
import datetime
import functools
import hashlib
import itertools
import json
import math
import os.path
import random
import re
import sys
import time
import traceback
import threading
from .common import InfoExtractor, SearchInfoExtractor
from ..compat import (
compat_chr,
compat_HTTPError,
compat_parse_qs,
compat_str,
compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urlparse,
)
from ..jsinterp import JSInterpreter
from ..utils import (
bug_reports_message,
clean_html,
datetime_from_str,
dict_get,
error_to_compat_str,
ExtractorError,
float_or_none,
format_field,
get_first,
int_or_none,
is_html,
join_nonempty,
js_to_json,
mimetype2ext,
network_exceptions,
NO_DEFAULT,
orderedSet,
parse_codecs,
parse_count,
parse_duration,
parse_iso8601,
parse_qs,
qualities,
remove_end,
remove_start,
smuggle_url,
str_or_none,
str_to_int,
strftime_or_none,
traverse_obj,
try_get,
unescapeHTML,
unified_strdate,
unified_timestamp,
unsmuggle_url,
update_url_query,
url_or_none,
urljoin,
variadic,
)
# any clients starting with _ cannot be explicity requested by the user
INNERTUBE_CLIENTS = {
'web': {
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'WEB',
'clientVersion': '2.20211221.00.00',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 1
},
'web_embedded': {
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'WEB_EMBEDDED_PLAYER',
'clientVersion': '1.20211215.00.01',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 56
},
'web_music': {
'INNERTUBE_API_KEY': 'AIzaSyC9XL3ZjWddXya6X74dJoCTL-WEYFDNX30',
'INNERTUBE_HOST': 'music.youtube.com',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'WEB_REMIX',
'clientVersion': '1.20211213.00.00',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 67,
},
'web_creator': {
'INNERTUBE_API_KEY': 'AIzaSyBUPetSUmoZL-OhlxA7wSac5XinrygCqMo',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'WEB_CREATOR',
'clientVersion': '1.20211220.02.00',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 62,
},
'android': {
'INNERTUBE_API_KEY': 'AIzaSyA8eiZmM1FaDVjRy-df2KTyQ_vz_yYM39w',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'ANDROID',
'clientVersion': '16.49',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 3,
'REQUIRE_JS_PLAYER': False
},
'android_embedded': {
'INNERTUBE_API_KEY': 'AIzaSyCjc_pVEDi4qsv5MtC2dMXzpIaDoRFLsxw',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'ANDROID_EMBEDDED_PLAYER',
'clientVersion': '16.49',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 55,
'REQUIRE_JS_PLAYER': False
},
'android_music': {
'INNERTUBE_API_KEY': 'AIzaSyAOghZGza2MQSZkY_zfZ370N-PUdXEo8AI',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'ANDROID_MUSIC',
'clientVersion': '4.57',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 21,
'REQUIRE_JS_PLAYER': False
},
'android_creator': {
'INNERTUBE_API_KEY': 'AIzaSyD_qjV8zaaUMehtLkrKFgVeSX_Iqbtyws8',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'ANDROID_CREATOR',
'clientVersion': '21.47',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 14,
'REQUIRE_JS_PLAYER': False
},
# iOS clients have HLS live streams. Setting device model to get 60fps formats.
# See: https://github.com/TeamNewPipe/NewPipeExtractor/issues/680#issuecomment-1002724558
'ios': {
'INNERTUBE_API_KEY': 'AIzaSyB-63vPrdThhKuerbB2N_l7Kwwcxj6yUAc',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'IOS',
'clientVersion': '16.46',
'deviceModel': 'iPhone14,3',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 5,
'REQUIRE_JS_PLAYER': False
},
'ios_embedded': {
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'IOS_MESSAGES_EXTENSION',
'clientVersion': '16.46',
'deviceModel': 'iPhone14,3',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 66,
'REQUIRE_JS_PLAYER': False
},
'ios_music': {
'INNERTUBE_API_KEY': 'AIzaSyBAETezhkwP0ZWA02RsqT1zu78Fpt0bC_s',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'IOS_MUSIC',
'clientVersion': '4.57',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 26,
'REQUIRE_JS_PLAYER': False
},
'ios_creator': {
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'IOS_CREATOR',
'clientVersion': '21.47',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 15,
'REQUIRE_JS_PLAYER': False
},
# mweb has 'ultralow' formats
# See: https://github.com/yt-dlp/yt-dlp/pull/557
'mweb': {
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'MWEB',
'clientVersion': '2.20211221.01.00',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 2
},
# This client can access age restricted videos (unless the uploader has disabled the 'allow embedding' option)
# See: https://github.com/zerodytrash/YouTube-Internal-Clients
'tv_embedded': {
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'TVHTML5_SIMPLY_EMBEDDED_PLAYER',
'clientVersion': '2.0',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 85
},
}
def _split_innertube_client(client_name):
variant, *base = client_name.rsplit('.', 1)
if base:
return variant, base[0], variant
base, *variant = client_name.split('_', 1)
return client_name, base, variant[0] if variant else None
def build_innertube_clients():
THIRD_PARTY = {
'embedUrl': 'https://www.youtube.com/', # Can be any valid URL
}
BASE_CLIENTS = ('android', 'web', 'tv', 'ios', 'mweb')
priority = qualities(BASE_CLIENTS[::-1])
for client, ytcfg in tuple(INNERTUBE_CLIENTS.items()):
ytcfg.setdefault('INNERTUBE_API_KEY', 'AIzaSyDCU8hByM-4DrUqRUYnGn-3llEO78bcxq8')
ytcfg.setdefault('INNERTUBE_HOST', 'www.youtube.com')
ytcfg.setdefault('REQUIRE_JS_PLAYER', True)
ytcfg['INNERTUBE_CONTEXT']['client'].setdefault('hl', 'en')
_, base_client, variant = _split_innertube_client(client)
ytcfg['priority'] = 10 * priority(base_client)
if not variant:
INNERTUBE_CLIENTS[f'{client}_embedscreen'] = embedscreen = copy.deepcopy(ytcfg)
embedscreen['INNERTUBE_CONTEXT']['client']['clientScreen'] = 'EMBED'
embedscreen['INNERTUBE_CONTEXT']['thirdParty'] = THIRD_PARTY
embedscreen['priority'] -= 3
elif variant == 'embedded':
ytcfg['INNERTUBE_CONTEXT']['thirdParty'] = THIRD_PARTY
ytcfg['priority'] -= 2
else:
ytcfg['priority'] -= 3
build_innertube_clients()
class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors"""
_RESERVED_NAMES = (
r'channel|c|user|playlist|watch|w|v|embed|e|watch_popup|clip|'
r'shorts|movies|results|search|shared|hashtag|trending|explore|feed|feeds|'
r'browse|oembed|get_video_info|iframe_api|s/player|'
r'storefront|oops|index|account|reporthistory|t/terms|about|upload|signin|logout')
_PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM|WL|LL|LM)'
# _NETRC_MACHINE = 'youtube'
# If True it will raise an error if no login info is provided
_LOGIN_REQUIRED = False
_INVIDIOUS_SITES = (
# invidious-redirect websites
r'(?:www\.)?redirect\.invidious\.io',
r'(?:(?:www|dev)\.)?invidio\.us',
# Invidious instances taken from https://github.com/iv-org/documentation/blob/master/Invidious-Instances.md
r'(?:www\.)?invidious\.pussthecat\.org',
r'(?:www\.)?invidious\.zee\.li',
r'(?:www\.)?invidious\.ethibox\.fr',
r'(?:www\.)?invidious\.3o7z6yfxhbw7n3za4rss6l434kmv55cgw2vuziwuigpwegswvwzqipyd\.onion',
r'(?:www\.)?osbivz6guyeahrwp2lnwyjk2xos342h4ocsxyqrlaopqjuhwn2djiiyd\.onion',
r'(?:www\.)?u2cvlit75owumwpy4dj2hsmvkq7nvrclkpht7xgyye2pyoxhpmclkrad\.onion',
# youtube-dl invidious instances list
r'(?:(?:www|no)\.)?invidiou\.sh',
r'(?:(?:www|fi)\.)?invidious\.snopyta\.org',
r'(?:www\.)?invidious\.kabi\.tk',
r'(?:www\.)?invidious\.mastodon\.host',
r'(?:www\.)?invidious\.zapashcanon\.fr',
r'(?:www\.)?(?:invidious(?:-us)?|piped)\.kavin\.rocks',
r'(?:www\.)?invidious\.tinfoil-hat\.net',
r'(?:www\.)?invidious\.himiko\.cloud',
r'(?:www\.)?invidious\.reallyancient\.tech',
r'(?:www\.)?invidious\.tube',
r'(?:www\.)?invidiou\.site',
r'(?:www\.)?invidious\.site',
r'(?:www\.)?invidious\.xyz',
r'(?:www\.)?invidious\.nixnet\.xyz',
r'(?:www\.)?invidious\.048596\.xyz',
r'(?:www\.)?invidious\.drycat\.fr',
r'(?:www\.)?inv\.skyn3t\.in',
r'(?:www\.)?tube\.poal\.co',
r'(?:www\.)?tube\.connect\.cafe',
r'(?:www\.)?vid\.wxzm\.sx',
r'(?:www\.)?vid\.mint\.lgbt',
r'(?:www\.)?vid\.puffyan\.us',
r'(?:www\.)?yewtu\.be',
r'(?:www\.)?yt\.elukerio\.org',
r'(?:www\.)?yt\.lelux\.fi',
r'(?:www\.)?invidious\.ggc-project\.de',
r'(?:www\.)?yt\.maisputain\.ovh',
r'(?:www\.)?ytprivate\.com',
r'(?:www\.)?invidious\.13ad\.de',
r'(?:www\.)?invidious\.toot\.koeln',
r'(?:www\.)?invidious\.fdn\.fr',
r'(?:www\.)?watch\.nettohikari\.com',
r'(?:www\.)?invidious\.namazso\.eu',
r'(?:www\.)?invidious\.silkky\.cloud',
r'(?:www\.)?invidious\.exonip\.de',
r'(?:www\.)?invidious\.riverside\.rocks',
r'(?:www\.)?invidious\.blamefran\.net',
r'(?:www\.)?invidious\.moomoo\.de',
r'(?:www\.)?ytb\.trom\.tf',
r'(?:www\.)?yt\.cyberhost\.uk',
r'(?:www\.)?kgg2m7yk5aybusll\.onion',
r'(?:www\.)?qklhadlycap4cnod\.onion',
r'(?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion',
r'(?:www\.)?c7hqkpkpemu6e7emz5b4vyz7idjgdvgaaa3dyimmeojqbgpea3xqjoid\.onion',
r'(?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion',
r'(?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion',
r'(?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p',
r'(?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion',
r'(?:www\.)?w6ijuptxiku4xpnnaetxvnkc5vqcdu7mgns2u77qefoixi63vbvnpnqd\.onion',
r'(?:www\.)?kbjggqkzv65ivcqj6bumvp337z6264huv5kpkwuv6gu5yjiskvan7fad\.onion',
r'(?:www\.)?grwp24hodrefzvjjuccrkw3mjq4tzhaaq32amf33dzpmuxe7ilepcmad\.onion',
r'(?:www\.)?hpniueoejy4opn7bc4ftgazyqjoeqwlvh2uiku2xqku6zpoa4bf5ruid\.onion',
)
def _initialize_consent(self):
cookies = self._get_cookies('https://www.youtube.com/')
if cookies.get('__Secure-3PSID'):
return
consent_id = None
consent = cookies.get('CONSENT')
if consent:
if 'YES' in consent.value:
return
consent_id = self._search_regex(
r'PENDING\+(\d+)', consent.value, 'consent', default=None)
if not consent_id:
consent_id = random.randint(100, 999)
self._set_cookie('.youtube.com', 'CONSENT', 'YES+cb.20210328-17-p0.en+FX+%s' % consent_id)
def _initialize_pref(self):
cookies = self._get_cookies('https://www.youtube.com/')
pref_cookie = cookies.get('PREF')
pref = {}
if pref_cookie:
try:
pref = dict(compat_urlparse.parse_qsl(pref_cookie.value))
except ValueError:
self.report_warning('Failed to parse user PREF cookie' + bug_reports_message())
pref.update({'hl': 'en', 'tz': 'UTC'})
self._set_cookie('.youtube.com', name='PREF', value=compat_urllib_parse_urlencode(pref))
def _real_initialize(self):
self._initialize_pref()
self._initialize_consent()
if (self._LOGIN_REQUIRED
and self.get_param('cookiefile') is None
and self.get_param('cookiesfrombrowser') is None):
self.raise_login_required('Login details are needed to download this content', method='cookies')
_YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;'
_YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;'
_YT_INITIAL_BOUNDARY_RE = r'(?:var\s+meta|</script|\n)'
def _get_default_ytcfg(self, client='web'):
return copy.deepcopy(INNERTUBE_CLIENTS[client])
def _get_innertube_host(self, client='web'):
return INNERTUBE_CLIENTS[client]['INNERTUBE_HOST']
def _ytcfg_get_safe(self, ytcfg, getter, expected_type=None, default_client='web'):
# try_get but with fallback to default ytcfg client values when present
_func = lambda y: try_get(y, getter, expected_type)
return _func(ytcfg) or _func(self._get_default_ytcfg(default_client))
def _extract_client_name(self, ytcfg, default_client='web'):
return self._ytcfg_get_safe(
ytcfg, (lambda x: x['INNERTUBE_CLIENT_NAME'],
lambda x: x['INNERTUBE_CONTEXT']['client']['clientName']), compat_str, default_client)
def _extract_client_version(self, ytcfg, default_client='web'):
return self._ytcfg_get_safe(
ytcfg, (lambda x: x['INNERTUBE_CLIENT_VERSION'],
lambda x: x['INNERTUBE_CONTEXT']['client']['clientVersion']), compat_str, default_client)
def _extract_api_key(self, ytcfg=None, default_client='web'):
return self._ytcfg_get_safe(ytcfg, lambda x: x['INNERTUBE_API_KEY'], compat_str, default_client)
def _extract_context(self, ytcfg=None, default_client='web'):
context = get_first(
(ytcfg, self._get_default_ytcfg(default_client)), 'INNERTUBE_CONTEXT', expected_type=dict)
# Enforce language and tz for extraction
client_context = traverse_obj(context, 'client', expected_type=dict, default={})
client_context.update({'hl': 'en', 'timeZone': 'UTC', 'utcOffsetMinutes': 0})
return context
_SAPISID = None
def _generate_sapisidhash_header(self, origin='https://www.youtube.com'):
time_now = round(time.time())
if self._SAPISID is None:
yt_cookies = self._get_cookies('https://www.youtube.com')
# Sometimes SAPISID cookie isn't present but __Secure-3PAPISID is.
# See: https://github.com/yt-dlp/yt-dlp/issues/393
sapisid_cookie = dict_get(
yt_cookies, ('__Secure-3PAPISID', 'SAPISID'))
if sapisid_cookie and sapisid_cookie.value:
self._SAPISID = sapisid_cookie.value
self.write_debug('Extracted SAPISID cookie')
# SAPISID cookie is required if not already present
if not yt_cookies.get('SAPISID'):
self.write_debug('Copying __Secure-3PAPISID cookie to SAPISID cookie')
self._set_cookie(
'.youtube.com', 'SAPISID', self._SAPISID, secure=True, expire_time=time_now + 3600)
else:
self._SAPISID = False
if not self._SAPISID:
return None
# SAPISIDHASH algorithm from https://stackoverflow.com/a/32065323
sapisidhash = hashlib.sha1(
f'{time_now} {self._SAPISID} {origin}'.encode('utf-8')).hexdigest()
return f'SAPISIDHASH {time_now}_{sapisidhash}'
def _call_api(self, ep, query, video_id, fatal=True, headers=None,
note='Downloading API JSON', errnote='Unable to download API page',
context=None, api_key=None, api_hostname=None, default_client='web'):
data = {'context': context} if context else {'context': self._extract_context(default_client=default_client)}
data.update(query)
real_headers = self.generate_api_headers(default_client=default_client)
real_headers.update({'content-type': 'application/json'})
if headers:
real_headers.update(headers)
return self._download_json(
'https://%s/youtubei/v1/%s' % (api_hostname or self._get_innertube_host(default_client), ep),
video_id=video_id, fatal=fatal, note=note, errnote=errnote,
data=json.dumps(data).encode('utf8'), headers=real_headers,
query={'key': api_key or self._extract_api_key(), 'prettyPrint': 'false'})
def extract_yt_initial_data(self, item_id, webpage, fatal=True):
data = self._search_regex(
(r'%s\s*%s' % (self._YT_INITIAL_DATA_RE, self._YT_INITIAL_BOUNDARY_RE),
self._YT_INITIAL_DATA_RE), webpage, 'yt initial data', fatal=fatal)
if data:
return self._parse_json(data, item_id, fatal=fatal)
@staticmethod
def _extract_session_index(*data):
"""
Index of current account in account list.
See: https://github.com/yt-dlp/yt-dlp/pull/519
"""
for ytcfg in data:
session_index = int_or_none(try_get(ytcfg, lambda x: x['SESSION_INDEX']))
if session_index is not None:
return session_index
# Deprecated?
def _extract_identity_token(self, ytcfg=None, webpage=None):
if ytcfg:
token = try_get(ytcfg, lambda x: x['ID_TOKEN'], compat_str)
if token:
return token
if webpage:
return self._search_regex(
r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
'identity token', default=None, fatal=False)
@staticmethod
def _extract_account_syncid(*args):
"""
Extract syncId required to download private playlists of secondary channels
@params response and/or ytcfg
"""
for data in args:
# ytcfg includes channel_syncid if on secondary channel
delegated_sid = try_get(data, lambda x: x['DELEGATED_SESSION_ID'], compat_str)
if delegated_sid:
return delegated_sid
sync_ids = (try_get(
data, (lambda x: x['responseContext']['mainAppWebResponseContext']['datasyncId'],
lambda x: x['DATASYNC_ID']), compat_str) or '').split('||')
if len(sync_ids) >= 2 and sync_ids[1]:
# datasyncid is of the form "channel_syncid||user_syncid" for secondary channel
# and just "user_syncid||" for primary channel. We only want the channel_syncid
return sync_ids[0]
@staticmethod
def _extract_visitor_data(*args):
"""
Extracts visitorData from an API response or ytcfg
Appears to be used to track session state
"""
return get_first(
args, [('VISITOR_DATA', ('INNERTUBE_CONTEXT', 'client', 'visitorData'), ('responseContext', 'visitorData'))],
expected_type=str)
@property
def is_authenticated(self):
return bool(self._generate_sapisidhash_header())
def extract_ytcfg(self, video_id, webpage):
if not webpage:
return {}
return self._parse_json(
self._search_regex(
r'ytcfg\.set\s*\(\s*({.+?})\s*\)\s*;', webpage, 'ytcfg',
default='{}'), video_id, fatal=False) or {}
def generate_api_headers(
self, *, ytcfg=None, account_syncid=None, session_index=None,
visitor_data=None, identity_token=None, api_hostname=None, default_client='web'):
origin = 'https://' + (api_hostname if api_hostname else self._get_innertube_host(default_client))
headers = {
'X-YouTube-Client-Name': compat_str(
self._ytcfg_get_safe(ytcfg, lambda x: x['INNERTUBE_CONTEXT_CLIENT_NAME'], default_client=default_client)),
'X-YouTube-Client-Version': self._extract_client_version(ytcfg, default_client),
'Origin': origin,
'X-Youtube-Identity-Token': identity_token or self._extract_identity_token(ytcfg),
'X-Goog-PageId': account_syncid or self._extract_account_syncid(ytcfg),
'X-Goog-Visitor-Id': visitor_data or self._extract_visitor_data(ytcfg)
}
if session_index is None:
session_index = self._extract_session_index(ytcfg)
if account_syncid or session_index is not None:
headers['X-Goog-AuthUser'] = session_index if session_index is not None else 0
auth = self._generate_sapisidhash_header(origin)
if auth is not None:
headers['Authorization'] = auth
headers['X-Origin'] = origin
return {h: v for h, v in headers.items() if v is not None}
@staticmethod
def _build_api_continuation_query(continuation, ctp=None):
query = {
'continuation': continuation
}
# TODO: Inconsistency with clickTrackingParams.
# Currently we have a fixed ctp contained within context (from ytcfg)
# and a ctp in root query for continuation.
if ctp:
query['clickTracking'] = {'clickTrackingParams': ctp}
return query
@classmethod
def _extract_next_continuation_data(cls, renderer):
next_continuation = try_get(
renderer, (lambda x: x['continuations'][0]['nextContinuationData'],
lambda x: x['continuation']['reloadContinuationData']), dict)
if not next_continuation:
return
continuation = next_continuation.get('continuation')
if not continuation:
return
ctp = next_continuation.get('clickTrackingParams')
return cls._build_api_continuation_query(continuation, ctp)
@classmethod
def _extract_continuation_ep_data(cls, continuation_ep: dict):
if isinstance(continuation_ep, dict):
continuation = try_get(
continuation_ep, lambda x: x['continuationCommand']['token'], compat_str)
if not continuation:
return
ctp = continuation_ep.get('clickTrackingParams')
return cls._build_api_continuation_query(continuation, ctp)
@classmethod
def _extract_continuation(cls, renderer):
next_continuation = cls._extract_next_continuation_data(renderer)
if next_continuation:
return next_continuation
contents = []
for key in ('contents', 'items'):
contents.extend(try_get(renderer, lambda x: x[key], list) or [])
for content in contents:
if not isinstance(content, dict):
continue
continuation_ep = try_get(
content, (lambda x: x['continuationItemRenderer']['continuationEndpoint'],
lambda x: x['continuationItemRenderer']['button']['buttonRenderer']['command']),
dict)
continuation = cls._extract_continuation_ep_data(continuation_ep)
if continuation:
return continuation
@classmethod
def _extract_alerts(cls, data):
for alert_dict in try_get(data, lambda x: x['alerts'], list) or []:
if not isinstance(alert_dict, dict):
continue
for alert in alert_dict.values():
alert_type = alert.get('type')
if not alert_type:
continue
message = cls._get_text(alert, 'text')
if message:
yield alert_type, message
def _report_alerts(self, alerts, expected=True, fatal=True, only_once=False):
errors = []
warnings = []
for alert_type, alert_message in alerts:
if alert_type.lower() == 'error' and fatal:
errors.append([alert_type, alert_message])
else:
warnings.append([alert_type, alert_message])
for alert_type, alert_message in (warnings + errors[:-1]):
self.report_warning('YouTube said: %s - %s' % (alert_type, alert_message), only_once=only_once)
if errors:
raise ExtractorError('YouTube said: %s' % errors[-1][1], expected=expected)
def _extract_and_report_alerts(self, data, *args, **kwargs):
return self._report_alerts(self._extract_alerts(data), *args, **kwargs)
def _extract_badges(self, renderer: dict):
badges = set()
for badge in try_get(renderer, lambda x: x['badges'], list) or []:
label = try_get(badge, lambda x: x['metadataBadgeRenderer']['label'], compat_str)
if label:
badges.add(label.lower())
return badges
@staticmethod
def _get_text(data, *path_list, max_runs=None):
for path in path_list or [None]:
if path is None:
obj = [data]
else:
obj = traverse_obj(data, path, default=[])
if not any(key is ... or isinstance(key, (list, tuple)) for key in variadic(path)):
obj = [obj]
for item in obj:
text = try_get(item, lambda x: x['simpleText'], compat_str)
if text:
return text
runs = try_get(item, lambda x: x['runs'], list) or []
if not runs and isinstance(item, list):
runs = item
runs = runs[:min(len(runs), max_runs or len(runs))]
text = ''.join(traverse_obj(runs, (..., 'text'), expected_type=str, default=[]))
if text:
return text
def _get_count(self, data, *path_list):
count_text = self._get_text(data, *path_list) or ''
count = parse_count(count_text)
if count is None:
count = str_to_int(
self._search_regex(r'^([\d,]+)', re.sub(r'\s', '', count_text), 'count', default=None))
return count
@staticmethod
def _extract_thumbnails(data, *path_list):
"""
Extract thumbnails from thumbnails dict
@param path_list: path list to level that contains 'thumbnails' key
"""
thumbnails = []
for path in path_list or [()]:
for thumbnail in traverse_obj(data, (*variadic(path), 'thumbnails', ...), default=[]):
thumbnail_url = url_or_none(thumbnail.get('url'))
if not thumbnail_url:
continue
# Sometimes youtube gives a wrong thumbnail URL. See:
# https://github.com/yt-dlp/yt-dlp/issues/233
# https://github.com/ytdl-org/youtube-dl/issues/28023
if 'maxresdefault' in thumbnail_url:
thumbnail_url = thumbnail_url.split('?')[0]
thumbnails.append({
'url': thumbnail_url,
'height': int_or_none(thumbnail.get('height')),
'width': int_or_none(thumbnail.get('width')),
})
return thumbnails
@staticmethod
def extract_relative_time(relative_time_text):
"""
Extracts a relative time from string and converts to dt object
e.g. 'streamed 6 days ago', '5 seconds ago (edited)', 'updated today'
"""
mobj = re.search(r'(?P<start>today|yesterday|now)|(?P<time>\d+)\s*(?P<unit>microsecond|second|minute|hour|day|week|month|year)s?\s*ago', relative_time_text)
if mobj:
start = mobj.group('start')
if start:
return datetime_from_str(start)
try:
return datetime_from_str('now-%s%s' % (mobj.group('time'), mobj.group('unit')))
except ValueError:
return None
def _extract_time_text(self, renderer, *path_list):
text = self._get_text(renderer, *path_list) or ''
dt = self.extract_relative_time(text)
timestamp = None
if isinstance(dt, datetime.datetime):
timestamp = calendar.timegm(dt.timetuple())
if timestamp is None:
timestamp = (
unified_timestamp(text) or unified_timestamp(
self._search_regex(
(r'([a-z]+\s*\d{1,2},?\s*20\d{2})', r'(?:.+|^)(?:live|premieres|ed|ing)(?:\s*(?:on|for))?\s*(.+\d)'),
text.lower(), 'time text', default=None)))
if text and timestamp is None:
self.report_warning(f"Cannot parse localized time text '{text}'" + bug_reports_message(), only_once=True)
return timestamp, text
def _extract_response(self, item_id, query, note='Downloading API JSON', headers=None,
ytcfg=None, check_get_keys=None, ep='browse', fatal=True, api_hostname=None,
default_client='web'):
response = None
last_error = None
count = -1
retries = self.get_param('extractor_retries', 3)
if check_get_keys is None:
check_get_keys = []
while count < retries:
count += 1
if last_error:
self.report_warning('%s. Retrying ...' % remove_end(last_error, '.'))
try:
response = self._call_api(
ep=ep, fatal=True, headers=headers,
video_id=item_id, query=query,
context=self._extract_context(ytcfg, default_client),
api_key=self._extract_api_key(ytcfg, default_client),
api_hostname=api_hostname, default_client=default_client,
note='%s%s' % (note, ' (retry #%d)' % count if count else ''))
except ExtractorError as e:
if isinstance(e.cause, network_exceptions):
if isinstance(e.cause, compat_HTTPError):
first_bytes = e.cause.read(512)
if not is_html(first_bytes):
yt_error = try_get(
self._parse_json(
self._webpage_read_content(e.cause, None, item_id, prefix=first_bytes) or '{}', item_id, fatal=False),
lambda x: x['error']['message'], compat_str)
if yt_error:
self._report_alerts([('ERROR', yt_error)], fatal=False)
# Downloading page may result in intermittent 5xx HTTP error
# Sometimes a 404 is also recieved. See: https://github.com/ytdl-org/youtube-dl/issues/28289
# We also want to catch all other network exceptions since errors in later pages can be troublesome
# See https://github.com/yt-dlp/yt-dlp/issues/507#issuecomment-880188210
if not isinstance(e.cause, compat_HTTPError) or e.cause.code not in (403, 429):
last_error = error_to_compat_str(e.cause or e.msg)
if count < retries:
continue
if fatal:
raise
else:
self.report_warning(error_to_compat_str(e))
return
else:
try:
self._extract_and_report_alerts(response, only_once=True)
except ExtractorError as e:
# YouTube servers may return errors we want to retry on in a 200 OK response
# See: https://github.com/yt-dlp/yt-dlp/issues/839
if 'unknown error' in e.msg.lower():
last_error = e.msg
continue
if fatal:
raise
self.report_warning(error_to_compat_str(e))
return
if not check_get_keys or dict_get(response, check_get_keys):
break
# Youtube sometimes sends incomplete data
# See: https://github.com/ytdl-org/youtube-dl/issues/28194
last_error = 'Incomplete data received'
if count >= retries:
if fatal:
raise ExtractorError(last_error)
else:
self.report_warning(last_error)
return
return response
@staticmethod
def is_music_url(url):
return re.match(r'https?://music\.youtube\.com/', url) is not None
def _extract_video(self, renderer):
video_id = renderer.get('videoId')
title = self._get_text(renderer, 'title')
description = self._get_text(renderer, 'descriptionSnippet')
duration = parse_duration(self._get_text(
renderer, 'lengthText', ('thumbnailOverlays', ..., 'thumbnailOverlayTimeStatusRenderer', 'text')))
if duration is None:
duration = parse_duration(self._search_regex(
r'(?i)(ago)(?!.*\1)\s+(?P<duration>[a-z0-9 ,]+?)(?:\s+[\d,]+\s+views)?(?:\s+-\s+play\s+short)?$',
traverse_obj(renderer, ('title', 'accessibility', 'accessibilityData', 'label'), default='', expected_type=str),
video_id, default=None, group='duration'))
view_count = self._get_count(renderer, 'viewCountText')
uploader = self._get_text(renderer, 'ownerText', 'shortBylineText')
channel_id = traverse_obj(
renderer, ('shortBylineText', 'runs', ..., 'navigationEndpoint', 'browseEndpoint', 'browseId'),
expected_type=str, get_all=False)
timestamp, time_text = self._extract_time_text(renderer, 'publishedTimeText')
scheduled_timestamp = str_to_int(traverse_obj(renderer, ('upcomingEventData', 'startTime'), get_all=False))
overlay_style = traverse_obj(
renderer, ('thumbnailOverlays', ..., 'thumbnailOverlayTimeStatusRenderer', 'style'),
get_all=False, expected_type=str)
badges = self._extract_badges(renderer)
thumbnails = self._extract_thumbnails(renderer, 'thumbnail')
navigation_url = urljoin('https://www.youtube.com/', traverse_obj(
renderer, ('navigationEndpoint', 'commandMetadata', 'webCommandMetadata', 'url'),
expected_type=str)) or ''
url = f'https://www.youtube.com/watch?v={video_id}'
if overlay_style == 'SHORTS' or '/shorts/' in navigation_url:
url = f'https://www.youtube.com/shorts/{video_id}'
return {
'_type': 'url',
'ie_key': YoutubeIE.ie_key(),
'id': video_id,
'url': url,
'title': title,
'description': description,
'duration': duration,
'view_count': view_count,
'uploader': uploader,
'channel_id': channel_id,
'thumbnails': thumbnails,
'upload_date': (strftime_or_none(timestamp, '%Y%m%d')
if self._configuration_arg('approximate_date', ie_key='youtubetab')
else None),
'live_status': ('is_upcoming' if scheduled_timestamp is not None
else 'was_live' if 'streamed' in time_text.lower()
else 'is_live' if overlay_style is not None and overlay_style == 'LIVE' or 'live now' in badges
else None),
'release_timestamp': scheduled_timestamp,
'availability': self._availability(needs_premium='premium' in badges, needs_subscription='members only' in badges)
}
class YoutubeIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube'
_VALID_URL = r"""(?x)^
(
(?:https?://|//) # http(s):// or protocol-independent URL
(?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie|kids)?\.com|
(?:www\.)?deturl\.com/www\.youtube\.com|
(?:www\.)?pwnyoutube\.com|
(?:www\.)?hooktube\.com|
(?:www\.)?yourepeat\.com|
tube\.majestyc\.net|
%(invidious)s|
youtube\.googleapis\.com)/ # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
(?: # the various things that can precede the ID:
(?:(?:v|embed|e|shorts)/(?!videoseries|live_stream)) # v/ or embed/ or e/ or shorts/
|(?: # or the v= param in all its forms
(?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
(?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&v=V36LpHqtcDY)
v=
)
))
|(?:
youtu\.be| # just youtu.be/xxxx
vid\.plus| # or vid.plus/xxxx
zwearz\.com/watch| # or zwearz.com/watch/xxxx
%(invidious)s
)/
|(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
)
)? # all until now is optional -> you can pass the naked ID
(?P<id>[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
(?(1).+)? # if we found the ID, everything can follow
(?:\#|$)""" % {
'invidious': '|'.join(YoutubeBaseInfoExtractor._INVIDIOUS_SITES),
}
_PLAYER_INFO_RE = (
r'/s/player/(?P<id>[a-zA-Z0-9_-]{8,})/player',
r'/(?P<id>[a-zA-Z0-9_-]{8,})/player(?:_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?|-plasma-ias-(?:phone|tablet)-[a-z]{2}_[A-Z]{2}\.vflset)/base\.js$',
r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.js$',
)
_formats = {
'5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
'17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
'18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
'22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
'36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
'37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# 3D videos
'82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
'101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
'102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
# Apple HTTP Live Streaming
'91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
# DASH mp4 video
'133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'},
'134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'},
'135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'},
'137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'},
'138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'}, # Height can vary (https://github.com/ytdl-org/youtube-dl/issues/4559)
'160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'},
'212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'},
'298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'},
# Dash mp4 audio
'139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'},
'140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'},
'141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'},
'256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'},
'328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'},
# Dash webm
'167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'},
'242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'},
# itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
'272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
# Dash webm audio
'171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128},
'172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256},
# Dash webm audio with opus inside
'249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50},
'250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70},
'251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160},
# RTMP (unnamed)
'_rtmp': {'protocol': 'rtmp'},
# av01 video only formats sometimes served with "unknown" codecs
'394': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'av01.0.00M.08'},
'395': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'av01.0.00M.08'},
'396': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'av01.0.01M.08'},
'397': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'av01.0.04M.08'},
'398': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'av01.0.05M.08'},
'399': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'av01.0.08M.08'},
'400': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'av01.0.12M.08'},
'401': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'av01.0.12M.08'},
}
_SUBTITLE_FORMATS = ('json3', 'srv1', 'srv2', 'srv3', 'ttml', 'vtt')
_GEO_BYPASS = False
IE_NAME = 'youtube'
_TESTS = [
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'channel': 'Philipp Hagemeister',
'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
'upload_date': '20121002',
'description': 'md5:8fb536f4877b8a7455c2ec23794dbc22',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'view_count': int,
'like_count': int,
'availability': 'public',
'playable_in_embed': True,
'thumbnail': 'https://i.ytimg.com/vi/BaW_jenozKc/maxresdefault.jpg',
'live_status': 'not_live',
'age_limit': 0,
'start_time': 1,
'end_time': 9,
'channel_follower_count': int
}
},
{
'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
'note': 'Embed-only video (#1746)',
'info_dict': {
'id': 'yZIXLfi8CZQ',
'ext': 'mp4',
'upload_date': '20120608',
'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
'uploader': 'SET India',
'uploader_id': 'setindia',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
'age_limit': 18,
},
'skip': 'Private video',
},
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=yZIXLfi8CZQ',
'note': 'Use the first video ID in the URL',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'channel': 'Philipp Hagemeister',
'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
'upload_date': '20121002',
'description': 'md5:8fb536f4877b8a7455c2ec23794dbc22',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'view_count': int,
'like_count': int,
'availability': 'public',
'playable_in_embed': True,
'thumbnail': 'https://i.ytimg.com/vi/BaW_jenozKc/maxresdefault.jpg',
'live_status': 'not_live',
'age_limit': 0,
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
'note': '256k DASH audio (format 141) via DASH manifest',
'info_dict': {
'id': 'a9LDPn-MO4I',
'ext': 'm4a',
'upload_date': '20121002',
'uploader_id': '8KVIDEO',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
'description': '',
'uploader': '8KVIDEO',
'title': 'UHDTV TEST 8K VIDEO.mp4'
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
'skip': 'format 141 not served anymore',
},
# DASH manifest with encrypted signature
{
'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
'info_dict': {
'id': 'IB3lcPjvWLA',
'ext': 'm4a',
'title': 'Afrojack, Spree Wilson - The Spark (Official Music Video) ft. Spree Wilson',
'description': 'md5:8f5e2b82460520b619ccac1f509d43bf',
'duration': 244,
'uploader': 'AfrojackVEVO',
'uploader_id': 'AfrojackVEVO',
'upload_date': '20131011',
'abr': 129.495,
'like_count': int,
'channel_id': 'UChuZAo1RKL85gev3Eal9_zg',
'playable_in_embed': True,
'channel_url': 'https://www.youtube.com/channel/UChuZAo1RKL85gev3Eal9_zg',
'view_count': int,
'track': 'The Spark',
'live_status': 'not_live',
'thumbnail': 'https://i.ytimg.com/vi_webp/IB3lcPjvWLA/maxresdefault.webp',
'channel': 'Afrojack',
'uploader_url': 'http://www.youtube.com/user/AfrojackVEVO',
'tags': 'count:19',
'availability': 'public',
'categories': ['Music'],
'age_limit': 0,
'alt_title': 'The Spark',
'channel_follower_count': int
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141/bestaudio[ext=m4a]',
},
},
# Age-gate videos. See https://github.com/yt-dlp/yt-dlp/pull/575#issuecomment-888837000
{
'note': 'Embed allowed age-gate video',
'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
'info_dict': {
'id': 'HtVdAasjOgU',
'ext': 'mp4',
'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
'duration': 142,
'uploader': 'The Witcher',
'uploader_id': 'WitcherGame',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
'upload_date': '20140605',
'age_limit': 18,
'categories': ['Gaming'],
'thumbnail': 'https://i.ytimg.com/vi_webp/HtVdAasjOgU/maxresdefault.webp',
'availability': 'needs_auth',
'channel_url': 'https://www.youtube.com/channel/UCzybXLxv08IApdjdN0mJhEg',
'like_count': int,
'channel': 'The Witcher',
'live_status': 'not_live',
'tags': 'count:17',
'channel_id': 'UCzybXLxv08IApdjdN0mJhEg',
'playable_in_embed': True,
'view_count': int,
'channel_follower_count': int
},
},
{
'note': 'Age-gate video with embed allowed in public site',
'url': 'https://youtube.com/watch?v=HsUATh_Nc2U',
'info_dict': {
'id': 'HsUATh_Nc2U',
'ext': 'mp4',
'title': 'Godzilla 2 (Official Video)',
'description': 'md5:bf77e03fcae5529475e500129b05668a',
'upload_date': '20200408',
'uploader_id': 'FlyingKitty900',
'uploader': 'FlyingKitty',
'age_limit': 18,
'availability': 'needs_auth',
'channel_id': 'UCYQT13AtrJC0gsM1far_zJg',
'uploader_url': 'http://www.youtube.com/user/FlyingKitty900',
'channel': 'FlyingKitty',
'channel_url': 'https://www.youtube.com/channel/UCYQT13AtrJC0gsM1far_zJg',
'view_count': int,
'categories': ['Entertainment'],
'live_status': 'not_live',
'tags': ['Flyingkitty', 'godzilla 2'],
'thumbnail': 'https://i.ytimg.com/vi/HsUATh_Nc2U/maxresdefault.jpg',
'like_count': int,
'duration': 177,
'playable_in_embed': True,
'channel_follower_count': int
},
},
{
'note': 'Age-gate video embedable only with clientScreen=EMBED',
'url': 'https://youtube.com/watch?v=Tq92D6wQ1mg',
'info_dict': {
'id': 'Tq92D6wQ1mg',
'title': '[MMD] Adios - EVERGLOW [+Motion DL]',
'ext': 'mp4',
'upload_date': '20191228',
'uploader_id': 'UC1yoRdFoFJaCY-AGfD9W0wQ',
'uploader': 'Projekt Melody',
'description': 'md5:17eccca93a786d51bc67646756894066',
'age_limit': 18,
'like_count': int,
'availability': 'needs_auth',
'uploader_url': 'http://www.youtube.com/channel/UC1yoRdFoFJaCY-AGfD9W0wQ',
'channel_id': 'UC1yoRdFoFJaCY-AGfD9W0wQ',
'view_count': int,
'thumbnail': 'https://i.ytimg.com/vi_webp/Tq92D6wQ1mg/sddefault.webp',
'channel': 'Projekt Melody',
'live_status': 'not_live',
'tags': ['mmd', 'dance', 'mikumikudance', 'kpop', 'vtuber'],
'playable_in_embed': True,
'categories': ['Entertainment'],
'duration': 106,
'channel_url': 'https://www.youtube.com/channel/UC1yoRdFoFJaCY-AGfD9W0wQ',
'channel_follower_count': int
},
},
{
'note': 'Non-Agegated non-embeddable video',
'url': 'https://youtube.com/watch?v=MeJVWBSsPAY',
'info_dict': {
'id': 'MeJVWBSsPAY',
'ext': 'mp4',
'title': 'OOMPH! - Such Mich Find Mich (Lyrics)',
'uploader': 'Herr Lurik',
'uploader_id': 'st3in234',
'description': 'Fan Video. Music & Lyrics by OOMPH!.',
'upload_date': '20130730',
'track': 'Such mich find mich',
'age_limit': 0,
'tags': ['oomph', 'such mich find mich', 'lyrics', 'german industrial', 'musica industrial'],
'like_count': int,
'playable_in_embed': False,
'creator': 'OOMPH!',
'thumbnail': 'https://i.ytimg.com/vi/MeJVWBSsPAY/sddefault.jpg',
'view_count': int,
'alt_title': 'Such mich find mich',
'duration': 210,
'channel': 'Herr Lurik',
'channel_id': 'UCdR3RSDPqub28LjZx0v9-aA',
'categories': ['Music'],
'availability': 'public',
'uploader_url': 'http://www.youtube.com/user/st3in234',
'channel_url': 'https://www.youtube.com/channel/UCdR3RSDPqub28LjZx0v9-aA',
'live_status': 'not_live',
'artist': 'OOMPH!',
'channel_follower_count': int
},
},
{
'note': 'Non-bypassable age-gated video',
'url': 'https://youtube.com/watch?v=Cr381pDsSsA',
'only_matching': True,
},
# video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421)
# YouTube Red ad is not captured for creator
{
'url': '__2ABJjxzNo',
'info_dict': {
'id': '__2ABJjxzNo',
'ext': 'mp4',
'duration': 266,
'upload_date': '20100430',
'uploader_id': 'deadmau5',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5',
'creator': 'deadmau5',
'description': 'md5:6cbcd3a92ce1bc676fc4d6ab4ace2336',
'uploader': 'deadmau5',
'title': 'Deadmau5 - Some Chords (HD)',
'alt_title': 'Some Chords',
'availability': 'public',
'tags': 'count:14',
'channel_id': 'UCYEK6xds6eo-3tr4xRdflmQ',
'view_count': int,
'live_status': 'not_live',
'channel': 'deadmau5',
'thumbnail': 'https://i.ytimg.com/vi_webp/__2ABJjxzNo/maxresdefault.webp',
'like_count': int,
'track': 'Some Chords',
'artist': 'deadmau5',
'playable_in_embed': True,
'age_limit': 0,
'channel_url': 'https://www.youtube.com/channel/UCYEK6xds6eo-3tr4xRdflmQ',
'categories': ['Music'],
'album': 'Some Chords',
'channel_follower_count': int
},
'expected_warnings': [
'DASH manifest missing',
]
},
# Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431)
{
'url': 'lqQg6PlCWgI',
'info_dict': {
'id': 'lqQg6PlCWgI',
'ext': 'mp4',
'duration': 6085,
'upload_date': '20150827',
'uploader_id': 'olympic',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic',
'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
'uploader': 'Olympics',
'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
'like_count': int,
'release_timestamp': 1343767800,
'playable_in_embed': True,
'categories': ['Sports'],
'release_date': '20120731',
'channel': 'Olympics',
'tags': ['Hockey', '2012-07-31', '31 July 2012', 'Riverbank Arena', 'Session', 'Olympics', 'Olympic Games', 'London 2012', '2012 Summer Olympics', 'Summer Games'],
'channel_id': 'UCTl3QQTvqHFjurroKxexy2Q',
'thumbnail': 'https://i.ytimg.com/vi/lqQg6PlCWgI/maxresdefault.jpg',
'age_limit': 0,
'availability': 'public',
'live_status': 'was_live',
'view_count': int,
'channel_url': 'https://www.youtube.com/channel/UCTl3QQTvqHFjurroKxexy2Q',
'channel_follower_count': int
},
'params': {
'skip_download': 'requires avconv',
}
},
# Non-square pixels
{
'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
'info_dict': {
'id': '_b-2C3KPAM0',
'ext': 'mp4',
'stretched_ratio': 16 / 9.,
'duration': 85,
'upload_date': '20110310',
'uploader_id': 'AllenMeow',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
'uploader': '孫ᄋᄅ',
'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
'playable_in_embed': True,
'channel': '孫ᄋᄅ',
'age_limit': 0,
'tags': 'count:11',
'channel_url': 'https://www.youtube.com/channel/UCS-xxCmRaA6BFdmgDPA_BIw',
'channel_id': 'UCS-xxCmRaA6BFdmgDPA_BIw',
'thumbnail': 'https://i.ytimg.com/vi/_b-2C3KPAM0/maxresdefault.jpg',
'view_count': int,
'categories': ['People & Blogs'],
'like_count': int,
'live_status': 'not_live',
'availability': 'unlisted',
'channel_follower_count': int
},
},
# url_encoded_fmt_stream_map is empty string
{
'url': 'qEJwOuvDf7I',
'info_dict': {
'id': 'qEJwOuvDf7I',
'ext': 'webm',
'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
'description': '',
'upload_date': '20150404',
'uploader_id': 'spbelect',
'uploader': 'Наблюдатели Петербурга',
},
'params': {
'skip_download': 'requires avconv',
},
'skip': 'This live event has ended.',
},
# Extraction from multiple DASH manifests (https://github.com/ytdl-org/youtube-dl/pull/6097)
{
'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
'info_dict': {
'id': 'FIl7x6_3R5Y',
'ext': 'webm',
'title': 'md5:7b81415841e02ecd4313668cde88737a',
'description': 'md5:116377fd2963b81ec4ce64b542173306',
'duration': 220,
'upload_date': '20150625',
'uploader_id': 'dorappi2000',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
'uploader': 'dorappi2000',
'formats': 'mincount:31',
},
'skip': 'not actual anymore',
},
# DASH manifest with segment_list
{
'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
'md5': '8ce563a1d667b599d21064e982ab9e31',
'info_dict': {
'id': 'CsmdDsKjzN8',
'ext': 'mp4',
'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
'uploader': 'Airtek',
'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '135', # bestvideo
},
'skip': 'This live event has ended.',
},
{
# Multifeed videos (multiple cameras), URL is for Main Camera
'url': 'https://www.youtube.com/watch?v=jvGDaLqkpTg',
'info_dict': {
'id': 'jvGDaLqkpTg',
'title': 'Tom Clancy Free Weekend Rainbow Whatever',
'description': 'md5:e03b909557865076822aa169218d6a5d',
},
'playlist': [{
'info_dict': {
'id': 'jvGDaLqkpTg',
'ext': 'mp4',
'title': 'Tom Clancy Free Weekend Rainbow Whatever (Main Camera)',
'description': 'md5:e03b909557865076822aa169218d6a5d',
'duration': 10643,
'upload_date': '20161111',
'uploader': 'Team PGP',
'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
},
}, {
'info_dict': {
'id': '3AKt1R1aDnw',
'ext': 'mp4',
'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 2)',
'description': 'md5:e03b909557865076822aa169218d6a5d',
'duration': 10991,
'upload_date': '20161111',
'uploader': 'Team PGP',
'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
},
}, {
'info_dict': {
'id': 'RtAMM00gpVc',
'ext': 'mp4',
'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 3)',
'description': 'md5:e03b909557865076822aa169218d6a5d',
'duration': 10995,
'upload_date': '20161111',
'uploader': 'Team PGP',
'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
},
}, {
'info_dict': {
'id': '6N2fdlP3C5U',
'ext': 'mp4',
'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 4)',
'description': 'md5:e03b909557865076822aa169218d6a5d',
'duration': 10990,
'upload_date': '20161111',
'uploader': 'Team PGP',
'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
},
}],
'params': {
'skip_download': True,
},
'skip': 'Not multifeed anymore',
},
{
# Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536)
'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
'info_dict': {
'id': 'gVfLd0zydlo',
'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
},
'playlist_count': 2,
'skip': 'Not multifeed anymore',
},
{
'url': 'https://vid.plus/FlRa-iH7PGw',
'only_matching': True,
},
{
'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
'only_matching': True,
},
{
# Title with JS-like syntax "};" (see https://github.com/ytdl-org/youtube-dl/issues/7468)
# Also tests cut-off URL expansion in video description (see
# https://github.com/ytdl-org/youtube-dl/issues/1892,
# https://github.com/ytdl-org/youtube-dl/issues/8164)
'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
'info_dict': {
'id': 'lsguqyKfVQg',
'ext': 'mp4',
'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
'alt_title': 'Dark Walk',
'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
'duration': 133,
'upload_date': '20151119',
'uploader_id': 'IronSoulElf',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
'uploader': 'IronSoulElf',
'creator': 'Todd Haberman;\nDaniel Law Heath and Aaron Kaplan',
'track': 'Dark Walk',
'artist': 'Todd Haberman;\nDaniel Law Heath and Aaron Kaplan',
'album': 'Position Music - Production Music Vol. 143 - Dark Walk',
'thumbnail': 'https://i.ytimg.com/vi_webp/lsguqyKfVQg/maxresdefault.webp',
'categories': ['Film & Animation'],
'view_count': int,
'live_status': 'not_live',
'channel_url': 'https://www.youtube.com/channel/UCTSRgz5jylBvFt_S7wnsqLQ',
'channel_id': 'UCTSRgz5jylBvFt_S7wnsqLQ',
'tags': 'count:13',
'availability': 'public',
'channel': 'IronSoulElf',
'playable_in_embed': True,
'like_count': int,
'age_limit': 0,
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
# Tags with '};' (see https://github.com/ytdl-org/youtube-dl/issues/7468)
'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
'only_matching': True,
},
{
# Video with yt:stretch=17:0
'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
'info_dict': {
'id': 'Q39EVAstoRM',
'ext': 'mp4',
'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
'description': 'md5:ee18a25c350637c8faff806845bddee9',
'upload_date': '20151107',
'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
'uploader': 'CH GAMER DROID',
},
'params': {
'skip_download': True,
},
'skip': 'This video does not exist.',
},
{
# Video with incomplete 'yt:stretch=16:'
'url': 'https://www.youtube.com/watch?v=FRhJzUSJbGI',
'only_matching': True,
},
{
# Video licensed under Creative Commons
'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
'info_dict': {
'id': 'M4gD1WSo5mA',
'ext': 'mp4',
'title': 'md5:e41008789470fc2533a3252216f1c1d1',
'description': 'md5:a677553cf0840649b731a3024aeff4cc',
'duration': 721,
'upload_date': '20150128',
'uploader_id': 'BerkmanCenter',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
'uploader': 'The Berkman Klein Center for Internet & Society',
'license': 'Creative Commons Attribution license (reuse allowed)',
'channel_id': 'UCuLGmD72gJDBwmLw06X58SA',
'channel_url': 'https://www.youtube.com/channel/UCuLGmD72gJDBwmLw06X58SA',
'like_count': int,
'age_limit': 0,
'tags': ['Copyright (Legal Subject)', 'Law (Industry)', 'William W. Fisher (Author)'],
'channel': 'The Berkman Klein Center for Internet & Society',
'availability': 'public',
'view_count': int,
'categories': ['Education'],
'thumbnail': 'https://i.ytimg.com/vi_webp/M4gD1WSo5mA/maxresdefault.webp',
'live_status': 'not_live',
'playable_in_embed': True,
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
# Channel-like uploader_url
'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
'info_dict': {
'id': 'eQcmzGIKrzg',
'ext': 'mp4',
'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
'description': 'md5:13a2503d7b5904ef4b223aa101628f39',
'duration': 4060,
'upload_date': '20151120',
'uploader': 'Bernie Sanders',
'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
'license': 'Creative Commons Attribution license (reuse allowed)',
'playable_in_embed': True,
'tags': 'count:12',
'like_count': int,
'channel_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
'age_limit': 0,
'availability': 'public',
'categories': ['News & Politics'],
'channel': 'Bernie Sanders',
'thumbnail': 'https://i.ytimg.com/vi_webp/eQcmzGIKrzg/maxresdefault.webp',
'view_count': int,
'live_status': 'not_live',
'channel_url': 'https://www.youtube.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;v=V36LpHqtcDY',
'only_matching': True,
},
{
# YouTube Red paid video (https://github.com/ytdl-org/youtube-dl/issues/10059)
'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
'only_matching': True,
},
{
# Rental video preview
'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
'info_dict': {
'id': 'uGpuVWrhIzE',
'ext': 'mp4',
'title': 'Piku - Trailer',
'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
'upload_date': '20150811',
'uploader': 'FlixMatrix',
'uploader_id': 'FlixMatrixKaravan',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
'license': 'Standard YouTube License',
},
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
},
{
# YouTube Red video with episode data
'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
'info_dict': {
'id': 'iqKdEhx-dD4',
'ext': 'mp4',
'title': 'Isolation - Mind Field (Ep 1)',
'description': 'md5:f540112edec5d09fc8cc752d3d4ba3cd',
'duration': 2085,
'upload_date': '20170118',
'uploader': 'Vsauce',
'uploader_id': 'Vsauce',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce',
'series': 'Mind Field',
'season_number': 1,
'episode_number': 1,
'thumbnail': 'https://i.ytimg.com/vi_webp/iqKdEhx-dD4/maxresdefault.webp',
'tags': 'count:12',
'view_count': int,
'availability': 'public',
'age_limit': 0,
'channel': 'Vsauce',
'episode': 'Episode 1',
'categories': ['Entertainment'],
'season': 'Season 1',
'channel_id': 'UC6nSFpj9HTCZ5t-N3Rm3-HA',
'channel_url': 'https://www.youtube.com/channel/UC6nSFpj9HTCZ5t-N3Rm3-HA',
'like_count': int,
'playable_in_embed': True,
'live_status': 'not_live',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
'expected_warnings': [
'Skipping DASH manifest',
],
},
{
# The following content has been identified by the YouTube community
# as inappropriate or offensive to some audiences.
'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI',
'info_dict': {
'id': '6SJNVb0GnPI',
'ext': 'mp4',
'title': 'Race Differences in Intelligence',
'description': 'md5:5d161533167390427a1f8ee89a1fc6f1',
'duration': 965,
'upload_date': '20140124',
'uploader': 'New Century Foundation',
'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg',
},
'params': {
'skip_download': True,
},
'skip': 'This video has been removed for violating YouTube\'s policy on hate speech.',
},
{
# itag 212
'url': '1t24XAntNCY',
'only_matching': True,
},
{
# geo restricted to JP
'url': 'sJL6WA-aGkQ',
'only_matching': True,
},
{
'url': 'https://invidio.us/watch?v=BaW_jenozKc',
'only_matching': True,
},
{
'url': 'https://redirect.invidious.io/watch?v=BaW_jenozKc',
'only_matching': True,
},
{
# from https://nitter.pussthecat.org/YouTube/status/1360363141947944964#m
'url': 'https://redirect.invidious.io/Yh0AhrY9GjA',
'only_matching': True,
},
{
# DRM protected
'url': 'https://www.youtube.com/watch?v=s7_qI6_mIXc',
'only_matching': True,
},
{
# Video with unsupported adaptive stream type formats
'url': 'https://www.youtube.com/watch?v=Z4Vy8R84T1U',
'info_dict': {
'id': 'Z4Vy8R84T1U',
'ext': 'mp4',
'title': 'saman SMAN 53 Jakarta(Sancety) opening COFFEE4th at SMAN 53 Jakarta',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'duration': 433,
'upload_date': '20130923',
'uploader': 'Amelia Putri Harwita',
'uploader_id': 'UCpOxM49HJxmC1qCalXyB3_Q',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCpOxM49HJxmC1qCalXyB3_Q',
'formats': 'maxcount:10',
},
'params': {
'skip_download': True,
'youtube_include_dash_manifest': False,
},
'skip': 'not actual anymore',
},
{
# Youtube Music Auto-generated description
'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
'info_dict': {
'id': 'MgNrAu2pzNs',
'ext': 'mp4',
'title': 'Voyeur Girl',
'description': 'md5:7ae382a65843d6df2685993e90a8628f',
'upload_date': '20190312',
'uploader': 'Stephen - Topic',
'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA',
'artist': 'Stephen',
'track': 'Voyeur Girl',
'album': 'it\'s too much love to know my dear',
'release_date': '20190313',
'release_year': 2019,
'alt_title': 'Voyeur Girl',
'view_count': int,
'uploader_url': 'http://www.youtube.com/channel/UC-pWHpBjdGG69N9mM2auIAA',
'playable_in_embed': True,
'like_count': int,
'categories': ['Music'],
'channel_url': 'https://www.youtube.com/channel/UC-pWHpBjdGG69N9mM2auIAA',
'channel': 'Stephen',
'availability': 'public',
'creator': 'Stephen',
'duration': 169,
'thumbnail': 'https://i.ytimg.com/vi_webp/MgNrAu2pzNs/maxresdefault.webp',
'age_limit': 0,
'channel_id': 'UC-pWHpBjdGG69N9mM2auIAA',
'tags': 'count:11',
'live_status': 'not_live',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtubekids.com/watch?v=3b8nCWDgZ6Q',
'only_matching': True,
},
{
# invalid -> valid video id redirection
'url': 'DJztXj2GPfl',
'info_dict': {
'id': 'DJztXj2GPfk',
'ext': 'mp4',
'title': 'Panjabi MC - Mundian To Bach Ke (The Dictator Soundtrack)',
'description': 'md5:bf577a41da97918e94fa9798d9228825',
'upload_date': '20090125',
'uploader': 'Prochorowka',
'uploader_id': 'Prochorowka',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Prochorowka',
'artist': 'Panjabi MC',
'track': 'Beware of the Boys (Mundian to Bach Ke) - Motivo Hi-Lectro Remix',
'album': 'Beware of the Boys (Mundian To Bach Ke)',
},
'params': {
'skip_download': True,
},
'skip': 'Video unavailable',
},
{
# empty description results in an empty string
'url': 'https://www.youtube.com/watch?v=x41yOUIvK2k',
'info_dict': {
'id': 'x41yOUIvK2k',
'ext': 'mp4',
'title': 'IMG 3456',
'description': '',
'upload_date': '20170613',
'uploader_id': 'ElevageOrVert',
'uploader': 'ElevageOrVert',
'view_count': int,
'thumbnail': 'https://i.ytimg.com/vi_webp/x41yOUIvK2k/maxresdefault.webp',
'uploader_url': 'http://www.youtube.com/user/ElevageOrVert',
'like_count': int,
'channel_id': 'UCo03ZQPBW5U4UC3regpt1nw',
'tags': [],
'channel_url': 'https://www.youtube.com/channel/UCo03ZQPBW5U4UC3regpt1nw',
'availability': 'public',
'age_limit': 0,
'categories': ['Pets & Animals'],
'duration': 7,
'playable_in_embed': True,
'live_status': 'not_live',
'channel': 'ElevageOrVert',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
# with '};' inside yt initial data (see [1])
# see [2] for an example with '};' inside ytInitialPlayerResponse
# 1. https://github.com/ytdl-org/youtube-dl/issues/27093
# 2. https://github.com/ytdl-org/youtube-dl/issues/27216
'url': 'https://www.youtube.com/watch?v=CHqg6qOn4no',
'info_dict': {
'id': 'CHqg6qOn4no',
'ext': 'mp4',
'title': 'Part 77 Sort a list of simple types in c#',
'description': 'md5:b8746fa52e10cdbf47997903f13b20dc',
'upload_date': '20130831',
'uploader_id': 'kudvenkat',
'uploader': 'kudvenkat',
'channel_id': 'UCCTVrRB5KpIiK6V2GGVsR1Q',
'like_count': int,
'uploader_url': 'http://www.youtube.com/user/kudvenkat',
'channel_url': 'https://www.youtube.com/channel/UCCTVrRB5KpIiK6V2GGVsR1Q',
'live_status': 'not_live',
'categories': ['Education'],
'availability': 'public',
'thumbnail': 'https://i.ytimg.com/vi/CHqg6qOn4no/sddefault.jpg',
'tags': 'count:12',
'playable_in_embed': True,
'age_limit': 0,
'view_count': int,
'duration': 522,
'channel': 'kudvenkat',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
# another example of '};' in ytInitialData
'url': 'https://www.youtube.com/watch?v=gVfgbahppCY',
'only_matching': True,
},
{
'url': 'https://www.youtube.com/watch_popup?v=63RmMXCd_bQ',
'only_matching': True,
},
{
# https://github.com/ytdl-org/youtube-dl/pull/28094
'url': 'OtqTfy26tG0',
'info_dict': {
'id': 'OtqTfy26tG0',
'ext': 'mp4',
'title': 'Burn Out',
'description': 'md5:8d07b84dcbcbfb34bc12a56d968b6131',
'upload_date': '20141120',
'uploader': 'The Cinematic Orchestra - Topic',
'uploader_id': 'UCIzsJBIyo8hhpFm1NK0uLgw',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCIzsJBIyo8hhpFm1NK0uLgw',
'artist': 'The Cinematic Orchestra',
'track': 'Burn Out',
'album': 'Every Day',
'like_count': int,
'live_status': 'not_live',
'alt_title': 'Burn Out',
'duration': 614,
'age_limit': 0,
'view_count': int,
'channel_url': 'https://www.youtube.com/channel/UCIzsJBIyo8hhpFm1NK0uLgw',
'creator': 'The Cinematic Orchestra',
'channel': 'The Cinematic Orchestra',
'tags': ['The Cinematic Orchestra', 'Every Day', 'Burn Out'],
'channel_id': 'UCIzsJBIyo8hhpFm1NK0uLgw',
'availability': 'public',
'thumbnail': 'https://i.ytimg.com/vi/OtqTfy26tG0/maxresdefault.jpg',
'categories': ['Music'],
'playable_in_embed': True,
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
# controversial video, only works with bpctr when authenticated with cookies
'url': 'https://www.youtube.com/watch?v=nGC3D_FkCmg',
'only_matching': True,
},
{
# controversial video, requires bpctr/contentCheckOk
'url': 'https://www.youtube.com/watch?v=SZJvDhaSDnc',
'info_dict': {
'id': 'SZJvDhaSDnc',
'ext': 'mp4',
'title': 'San Diego teen commits suicide after bullying over embarrassing video',
'channel_id': 'UC-SJ6nODDmufqBzPBwCvYvQ',
'uploader': 'CBS Mornings',
'uploader_id': 'CBSThisMorning',
'upload_date': '20140716',
'description': 'md5:acde3a73d3f133fc97e837a9f76b53b7',
'duration': 170,
'categories': ['News & Politics'],
'uploader_url': 'http://www.youtube.com/user/CBSThisMorning',
'view_count': int,
'channel': 'CBS Mornings',
'tags': ['suicide', 'bullying', 'video', 'cbs', 'news'],
'thumbnail': 'https://i.ytimg.com/vi/SZJvDhaSDnc/hqdefault.jpg',
'age_limit': 18,
'availability': 'needs_auth',
'channel_url': 'https://www.youtube.com/channel/UC-SJ6nODDmufqBzPBwCvYvQ',
'like_count': int,
'live_status': 'not_live',
'playable_in_embed': True,
'channel_follower_count': int
}
},
{
# restricted location, https://github.com/ytdl-org/youtube-dl/issues/28685
'url': 'cBvYw8_A0vQ',
'info_dict': {
'id': 'cBvYw8_A0vQ',
'ext': 'mp4',
'title': '4K Ueno Okachimachi Street Scenes 上野御徒町歩き',
'description': 'md5:ea770e474b7cd6722b4c95b833c03630',
'upload_date': '20201120',
'uploader': 'Walk around Japan',
'uploader_id': 'UC3o_t8PzBmXf5S9b7GLx1Mw',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UC3o_t8PzBmXf5S9b7GLx1Mw',
'duration': 1456,
'categories': ['Travel & Events'],
'channel_id': 'UC3o_t8PzBmXf5S9b7GLx1Mw',
'view_count': int,
'channel': 'Walk around Japan',
'tags': ['Ueno Tokyo', 'Okachimachi Tokyo', 'Ameyoko Street', 'Tokyo attraction', 'Travel in Tokyo'],
'thumbnail': 'https://i.ytimg.com/vi_webp/cBvYw8_A0vQ/hqdefault.webp',
'age_limit': 0,
'availability': 'public',
'channel_url': 'https://www.youtube.com/channel/UC3o_t8PzBmXf5S9b7GLx1Mw',
'live_status': 'not_live',
'playable_in_embed': True,
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
}, {
# Has multiple audio streams
'url': 'WaOKSUlf4TM',
'only_matching': True
}, {
# Requires Premium: has format 141 when requested using YTM url
'url': 'https://music.youtube.com/watch?v=XclachpHxis',
'only_matching': True
}, {
# multiple subtitles with same lang_code
'url': 'https://www.youtube.com/watch?v=wsQiKKfKxug',
'only_matching': True,
}, {
# Force use android client fallback
'url': 'https://www.youtube.com/watch?v=YOelRv7fMxY',
'info_dict': {
'id': 'YOelRv7fMxY',
'title': 'DIGGING A SECRET TUNNEL Part 1',
'ext': '3gp',
'upload_date': '20210624',
'channel_id': 'UCp68_FLety0O-n9QU6phsgw',
'uploader': 'colinfurze',
'uploader_id': 'colinfurze',
'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCp68_FLety0O-n9QU6phsgw',
'description': 'md5:5d5991195d599b56cd0c4148907eec50',
'duration': 596,
'categories': ['Entertainment'],
'uploader_url': 'http://www.youtube.com/user/colinfurze',
'view_count': int,
'channel': 'colinfurze',
'tags': ['Colin', 'furze', 'Terry', 'tunnel', 'underground', 'bunker'],
'thumbnail': 'https://i.ytimg.com/vi/YOelRv7fMxY/maxresdefault.jpg',
'age_limit': 0,
'availability': 'public',
'like_count': int,
'live_status': 'not_live',
'playable_in_embed': True,
'channel_follower_count': int
},
'params': {
'format': '17', # 3gp format available on android
'extractor_args': {'youtube': {'player_client': ['android']}},
},
},
{
# Skip download of additional client configs (remix client config in this case)
'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
'only_matching': True,
'params': {
'extractor_args': {'youtube': {'player_skip': ['configs']}},
},
}, {
# shorts
'url': 'https://www.youtube.com/shorts/BGQWPY4IigY',
'only_matching': True,
}, {
'note': 'Storyboards',
'url': 'https://www.youtube.com/watch?v=5KLPxDtMqe8',
'info_dict': {
'id': '5KLPxDtMqe8',
'ext': 'mhtml',
'format_id': 'sb0',
'title': 'Your Brain is Plastic',
'uploader_id': 'scishow',
'description': 'md5:89cd86034bdb5466cd87c6ba206cd2bc',
'upload_date': '20140324',
'uploader': 'SciShow',
'like_count': int,
'channel_id': 'UCZYTClx2T1of7BRZ86-8fow',
'channel_url': 'https://www.youtube.com/channel/UCZYTClx2T1of7BRZ86-8fow',
'view_count': int,
'thumbnail': 'https://i.ytimg.com/vi/5KLPxDtMqe8/maxresdefault.jpg',
'playable_in_embed': True,
'tags': 'count:12',
'uploader_url': 'http://www.youtube.com/user/scishow',
'availability': 'public',
'channel': 'SciShow',
'live_status': 'not_live',
'duration': 248,
'categories': ['Education'],
'age_limit': 0,
'channel_follower_count': int
}, 'params': {'format': 'mhtml', 'skip_download': True}
}, {
# Ensure video upload_date is in UTC timezone (video was uploaded 1641170939)
'url': 'https://www.youtube.com/watch?v=2NUZ8W2llS4',
'info_dict': {
'id': '2NUZ8W2llS4',
'ext': 'mp4',
'title': 'The NP that test your phone performance 🙂',
'description': 'md5:144494b24d4f9dfacb97c1bbef5de84d',
'uploader': 'Leon Nguyen',
'uploader_id': 'VNSXIII',
'uploader_url': 'http://www.youtube.com/user/VNSXIII',
'channel_id': 'UCRqNBSOHgilHfAczlUmlWHA',
'channel_url': 'https://www.youtube.com/channel/UCRqNBSOHgilHfAczlUmlWHA',
'duration': 21,
'view_count': int,
'age_limit': 0,
'categories': ['Gaming'],
'tags': 'count:23',
'playable_in_embed': True,
'live_status': 'not_live',
'upload_date': '20220103',
'like_count': int,
'availability': 'public',
'channel': 'Leon Nguyen',
'thumbnail': 'https://i.ytimg.com/vi_webp/2NUZ8W2llS4/maxresdefault.webp',
'channel_follower_count': int
}
}, {
# date text is premiered video, ensure upload date in UTC (published 1641172509)
'url': 'https://www.youtube.com/watch?v=mzZzzBU6lrM',
'info_dict': {
'id': 'mzZzzBU6lrM',
'ext': 'mp4',
'title': 'I Met GeorgeNotFound In Real Life...',
'description': 'md5:cca98a355c7184e750f711f3a1b22c84',
'uploader': 'Quackity',
'uploader_id': 'QuackityHQ',
'uploader_url': 'http://www.youtube.com/user/QuackityHQ',
'channel_id': 'UC_8NknAFiyhOUaZqHR3lq3Q',
'channel_url': 'https://www.youtube.com/channel/UC_8NknAFiyhOUaZqHR3lq3Q',
'duration': 955,
'view_count': int,
'age_limit': 0,
'categories': ['Entertainment'],
'tags': 'count:26',
'playable_in_embed': True,
'live_status': 'not_live',
'release_timestamp': 1641172509,
'release_date': '20220103',
'upload_date': '20220103',
'like_count': int,
'availability': 'public',
'channel': 'Quackity',
'thumbnail': 'https://i.ytimg.com/vi/mzZzzBU6lrM/maxresdefault.jpg',
'channel_follower_count': int
}
},
{ # continuous livestream. Microformat upload date should be preferred.
# Upload date was 2021-06-19 (not UTC), while stream start is 2021-11-27
'url': 'https://www.youtube.com/watch?v=kgx4WGK0oNU',
'info_dict': {
'id': 'kgx4WGK0oNU',
'title': r're:jazz\/lofi hip hop radio🌱chill beats to relax\/study to \[LIVE 24\/7\] \d{4}-\d{2}-\d{2} \d{2}:\d{2}',
'ext': 'mp4',
'channel_id': 'UC84whx2xxsiA1gXHXXqKGOA',
'availability': 'public',
'age_limit': 0,
'release_timestamp': 1637975704,
'upload_date': '20210619',
'channel_url': 'https://www.youtube.com/channel/UC84whx2xxsiA1gXHXXqKGOA',
'live_status': 'is_live',
'thumbnail': 'https://i.ytimg.com/vi/kgx4WGK0oNU/maxresdefault.jpg',
'uploader': '阿鲍Abao',
'uploader_url': 'http://www.youtube.com/channel/UC84whx2xxsiA1gXHXXqKGOA',
'channel': 'Abao in Tokyo',
'channel_follower_count': int,
'release_date': '20211127',
'tags': 'count:39',
'categories': ['People & Blogs'],
'like_count': int,
'uploader_id': 'UC84whx2xxsiA1gXHXXqKGOA',
'view_count': int,
'playable_in_embed': True,
'description': 'md5:2ef1d002cad520f65825346e2084e49d',
},
'params': {'skip_download': True}
},
]
@classmethod
def suitable(cls, url):
from ..utils import parse_qs
qs = parse_qs(url)
if qs.get('list', [None])[0]:
return False
return super(YoutubeIE, cls).suitable(url)
def __init__(self, *args, **kwargs):
super(YoutubeIE, self).__init__(*args, **kwargs)
self._code_cache = {}
self._player_cache = {}
def _prepare_live_from_start_formats(self, formats, video_id, live_start_time, url, webpage_url, smuggled_data):
lock = threading.Lock()
is_live = True
start_time = time.time()
formats = [f for f in formats if f.get('is_from_start')]
def refetch_manifest(format_id, delay):
nonlocal formats, start_time, is_live
if time.time() <= start_time + delay:
return
_, _, prs, player_url = self._download_player_responses(url, smuggled_data, video_id, webpage_url)
video_details = traverse_obj(
prs, (..., 'videoDetails'), expected_type=dict, default=[])
microformats = traverse_obj(
prs, (..., 'microformat', 'playerMicroformatRenderer'),
expected_type=dict, default=[])
_, is_live, _, formats = self._list_formats(video_id, microformats, video_details, prs, player_url)
start_time = time.time()
def mpd_feed(format_id, delay):
"""
@returns (manifest_url, manifest_stream_number, is_live) or None
"""
with lock:
refetch_manifest(format_id, delay)
f = next((f for f in formats if f['format_id'] == format_id), None)
if not f:
if not is_live:
self.to_screen(f'{video_id}: Video is no longer live')
else:
self.report_warning(
f'Cannot find refreshed manifest for format {format_id}{bug_reports_message()}')
return None
return f['manifest_url'], f['manifest_stream_number'], is_live
for f in formats:
f['is_live'] = True
f['protocol'] = 'http_dash_segments_generator'
f['fragments'] = functools.partial(
self._live_dash_fragments, f['format_id'], live_start_time, mpd_feed)
def _live_dash_fragments(self, format_id, live_start_time, mpd_feed, ctx):
FETCH_SPAN, MAX_DURATION = 5, 432000
mpd_url, stream_number, is_live = None, None, True
begin_index = 0
download_start_time = ctx.get('start') or time.time()
lack_early_segments = download_start_time - (live_start_time or download_start_time) > MAX_DURATION
if lack_early_segments:
self.report_warning(bug_reports_message(
'Starting download from the last 120 hours of the live stream since '
'YouTube does not have data before that. If you think this is wrong,'), only_once=True)
lack_early_segments = True
known_idx, no_fragment_score, last_segment_url = begin_index, 0, None
fragments, fragment_base_url = None, None
def _extract_sequence_from_mpd(refresh_sequence, immediate):
nonlocal mpd_url, stream_number, is_live, no_fragment_score, fragments, fragment_base_url
# Obtain from MPD's maximum seq value
old_mpd_url = mpd_url
last_error = ctx.pop('last_error', None)
expire_fast = immediate or last_error and isinstance(last_error, compat_HTTPError) and last_error.code == 403
mpd_url, stream_number, is_live = (mpd_feed(format_id, 5 if expire_fast else 18000)
or (mpd_url, stream_number, False))
if not refresh_sequence:
if expire_fast and not is_live:
return False, last_seq
elif old_mpd_url == mpd_url:
return True, last_seq
try:
fmts, _ = self._extract_mpd_formats_and_subtitles(
mpd_url, None, note=False, errnote=False, fatal=False)
except ExtractorError:
fmts = None
if not fmts:
no_fragment_score += 2
return False, last_seq
fmt_info = next(x for x in fmts if x['manifest_stream_number'] == stream_number)
fragments = fmt_info['fragments']
fragment_base_url = fmt_info['fragment_base_url']
assert fragment_base_url
_last_seq = int(re.search(r'(?:/|^)sq/(\d+)', fragments[-1]['path']).group(1))
return True, _last_seq
while is_live:
fetch_time = time.time()
if no_fragment_score > 30:
return
if last_segment_url:
# Obtain from "X-Head-Seqnum" header value from each segment
try:
urlh = self._request_webpage(
last_segment_url, None, note=False, errnote=False, fatal=False)
except ExtractorError:
urlh = None
last_seq = try_get(urlh, lambda x: int_or_none(x.headers['X-Head-Seqnum']))
if last_seq is None:
no_fragment_score += 2
last_segment_url = None
continue
else:
should_continue, last_seq = _extract_sequence_from_mpd(True, no_fragment_score > 15)
no_fragment_score += 2
if not should_continue:
continue
if known_idx > last_seq:
last_segment_url = None
continue
last_seq += 1
if begin_index < 0 and known_idx < 0:
# skip from the start when it's negative value
known_idx = last_seq + begin_index
if lack_early_segments:
known_idx = max(known_idx, last_seq - int(MAX_DURATION // fragments[-1]['duration']))
try:
for idx in range(known_idx, last_seq):
# do not update sequence here or you'll get skipped some part of it
should_continue, _ = _extract_sequence_from_mpd(False, False)
if not should_continue:
known_idx = idx - 1
raise ExtractorError('breaking out of outer loop')
last_segment_url = urljoin(fragment_base_url, 'sq/%d' % idx)
yield {
'url': last_segment_url,
}
if known_idx == last_seq:
no_fragment_score += 5
else:
no_fragment_score = 0
known_idx = last_seq
except ExtractorError:
continue
time.sleep(max(0, FETCH_SPAN + fetch_time - time.time()))
def _extract_player_url(self, *ytcfgs, webpage=None):
player_url = traverse_obj(
ytcfgs, (..., 'PLAYER_JS_URL'), (..., 'WEB_PLAYER_CONTEXT_CONFIGS', ..., 'jsUrl'),
get_all=False, expected_type=compat_str)
if not player_url:
return
return urljoin('https://www.youtube.com', player_url)
def _download_player_url(self, video_id, fatal=False):
res = self._download_webpage(
'https://www.youtube.com/iframe_api',
note='Downloading iframe API JS', video_id=video_id, fatal=fatal)
if res:
player_version = self._search_regex(
r'player\\?/([0-9a-fA-F]{8})\\?/', res, 'player version', fatal=fatal)
if player_version:
return f'https://www.youtube.com/s/player/{player_version}/player_ias.vflset/en_US/base.js'
def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
@classmethod
def _extract_player_info(cls, player_url):
for player_re in cls._PLAYER_INFO_RE:
id_m = re.search(player_re, player_url)
if id_m:
break
else:
raise ExtractorError('Cannot identify player %r' % player_url)
return id_m.group('id')
def _load_player(self, video_id, player_url, fatal=True):
player_id = self._extract_player_info(player_url)
if player_id not in self._code_cache:
code = self._download_webpage(
player_url, video_id, fatal=fatal,
note='Downloading player ' + player_id,
errnote='Download of %s failed' % player_url)
if code:
self._code_cache[player_id] = code
return self._code_cache.get(player_id)
def _extract_signature_function(self, video_id, player_url, example_sig):
player_id = self._extract_player_info(player_url)
# Read from filesystem cache
func_id = 'js_%s_%s' % (
player_id, self._signature_cache_id(example_sig))
assert os.path.basename(func_id) == func_id
cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
if cache_spec is not None:
return lambda s: ''.join(s[i] for i in cache_spec)
code = self._load_player(video_id, player_url)
if code:
res = self._parse_sig_js(code)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = res(test_string)
cache_spec = [ord(c) for c in cache_res]
self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
return res
def _print_sig_code(self, func, example_sig):
if not self.get_param('youtube_print_sig_code'):
return
def gen_sig_code(idxs):
def _genslice(start, end, step):
starts = '' if start == 0 else str(start)
ends = (':%d' % (end + step)) if end + step >= 0 else ':'
steps = '' if step == 1 else (':%d' % step)
return 's[%s%s%s]' % (starts, ends, steps)
step = None
# Quelch pyflakes warnings - start will be set when step is set
start = '(Never used)'
for i, prev in zip(idxs[1:], idxs[:-1]):
if step is not None:
if i - prev == step:
continue
yield _genslice(start, prev, step)
step = None
continue
if i - prev in [-1, 1]:
step = i - prev
start = prev
continue
else:
yield 's[%d]' % prev
if step is None:
yield 's[%d]' % i
else:
yield _genslice(start, i, step)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = func(test_string)
cache_spec = [ord(c) for c in cache_res]
expr_code = ' + '.join(gen_sig_code(cache_spec))
signature_id_tuple = '(%s)' % (
', '.join(compat_str(len(p)) for p in example_sig.split('.')))
code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
' return %s\n') % (signature_id_tuple, expr_code)
self.to_screen('Extracted signature function:\n' + code)
def _parse_sig_js(self, jscode):
funcname = self._search_regex(
(r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bm=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(h\.s\)\)',
r'\bc&&\(c=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(c\)\)',
r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\);[a-zA-Z0-9$]{2}\.[a-zA-Z0-9$]{2}\(a,\d+\)',
r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
# Obsolete patterns
r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('),
jscode, 'Initial JS player signature function name', group='sig')
jsi = JSInterpreter(jscode)
initial_function = jsi.extract_function(funcname)
return lambda s: initial_function([s])
def _decrypt_signature(self, s, video_id, player_url):
"""Turn the encrypted s field into a working signature"""
if player_url is None:
raise ExtractorError('Cannot decrypt signature without player_url')
try:
player_id = (player_url, self._signature_cache_id(s))
if player_id not in self._player_cache:
func = self._extract_signature_function(
video_id, player_url, s
)
self._player_cache[player_id] = func
func = self._player_cache[player_id]
self._print_sig_code(func, s)
return func(s)
except Exception as e:
raise ExtractorError('Signature extraction failed: ' + traceback.format_exc(), cause=e)
def _decrypt_nsig(self, s, video_id, player_url):
"""Turn the encrypted n field into a working signature"""
if player_url is None:
raise ExtractorError('Cannot decrypt nsig without player_url')
player_url = urljoin('https://www.youtube.com', player_url)
sig_id = ('nsig_value', s)
if sig_id in self._player_cache:
return self._player_cache[sig_id]
try:
player_id = ('nsig', player_url)
if player_id not in self._player_cache:
self._player_cache[player_id] = self._extract_n_function(video_id, player_url)
func = self._player_cache[player_id]
self._player_cache[sig_id] = func(s)
self.write_debug(f'Decrypted nsig {s} => {self._player_cache[sig_id]}')
return self._player_cache[sig_id]
except Exception as e:
raise ExtractorError(traceback.format_exc(), cause=e, video_id=video_id)
def _extract_n_function_name(self, jscode):
nfunc, idx = self._search_regex(
r'\.get\("n"\)\)&&\(b=(?P<nfunc>[a-zA-Z0-9$]+)(?:\[(?P<idx>\d+)\])?\([a-zA-Z0-9]\)',
jscode, 'Initial JS player n function name', group=('nfunc', 'idx'))
if not idx:
return nfunc
return json.loads(js_to_json(self._search_regex(
rf'var {re.escape(nfunc)}\s*=\s*(\[.+?\]);', jscode,
f'Initial JS player n function list ({nfunc}.{idx})')))[int(idx)]
def _extract_n_function(self, video_id, player_url):
player_id = self._extract_player_info(player_url)
func_code = self._downloader.cache.load('youtube-nsig', player_id)
if func_code:
jsi = JSInterpreter(func_code)
else:
jscode = self._load_player(video_id, player_url)
funcname = self._extract_n_function_name(jscode)
jsi = JSInterpreter(jscode)
func_code = jsi.extract_function_code(funcname)
self._downloader.cache.store('youtube-nsig', player_id, func_code)
if self.get_param('youtube_print_sig_code'):
self.to_screen(f'Extracted nsig function from {player_id}:\n{func_code[1]}\n')
return lambda s: jsi.extract_function_from_code(*func_code)([s])
def _extract_signature_timestamp(self, video_id, player_url, ytcfg=None, fatal=False):
"""
Extract signatureTimestamp (sts)
Required to tell API what sig/player version is in use.
"""
sts = None
if isinstance(ytcfg, dict):
sts = int_or_none(ytcfg.get('STS'))
if not sts:
# Attempt to extract from player
if player_url is None:
error_msg = 'Cannot extract signature timestamp without player_url.'
if fatal:
raise ExtractorError(error_msg)
self.report_warning(error_msg)
return
code = self._load_player(video_id, player_url, fatal=fatal)
if code:
sts = int_or_none(self._search_regex(
r'(?:signatureTimestamp|sts)\s*:\s*(?P<sts>[0-9]{5})', code,
'JS player signature timestamp', group='sts', fatal=fatal))
return sts
def _mark_watched(self, video_id, player_responses):
playback_url = get_first(
player_responses, ('playbackTracking', 'videostatsPlaybackUrl', 'baseUrl'),
expected_type=url_or_none)
if not playback_url:
self.report_warning('Unable to mark watched')
return
parsed_playback_url = compat_urlparse.urlparse(playback_url)
qs = compat_urlparse.parse_qs(parsed_playback_url.query)
# cpn generation algorithm is reverse engineered from base.js.
# In fact it works even with dummy cpn.
CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
qs.update({
'ver': ['2'],
'cpn': [cpn],
})
playback_url = compat_urlparse.urlunparse(
parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
self._download_webpage(
playback_url, video_id, 'Marking watched',
'Unable to mark watched', fatal=False)
@staticmethod
def _extract_urls(webpage):
# Embedded YouTube player
entries = [
unescapeHTML(mobj.group('url'))
for mobj in re.finditer(r'''(?x)
(?:
<iframe[^>]+?src=|
data-video-url=|
<embed[^>]+?src=|
embedSWF\(?:\s*|
<object[^>]+data=|
new\s+SWFObject\(
)
(["\'])
(?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
(?:embed|v|p)/[0-9A-Za-z_-]{11}.*?)
\1''', webpage)]
# lazyYT YouTube embed
entries.extend(list(map(
unescapeHTML,
re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage))))
# Wordpress "YouTube Video Importer" plugin
matches = re.findall(r'''(?x)<div[^>]+
class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
entries.extend(m[-1] for m in matches)
return entries
@staticmethod
def _extract_url(webpage):
urls = YoutubeIE._extract_urls(webpage)
return urls[0] if urls else None
@classmethod
def extract_id(cls, url):
mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
return mobj.group('id')
def _extract_chapters_from_json(self, data, duration):
chapter_list = traverse_obj(
data, (
'playerOverlays', 'playerOverlayRenderer', 'decoratedPlayerBarRenderer',
'decoratedPlayerBarRenderer', 'playerBar', 'chapteredPlayerBarRenderer', 'chapters'
), expected_type=list)
return self._extract_chapters(
chapter_list,
chapter_time=lambda chapter: float_or_none(
traverse_obj(chapter, ('chapterRenderer', 'timeRangeStartMillis')), scale=1000),
chapter_title=lambda chapter: traverse_obj(
chapter, ('chapterRenderer', 'title', 'simpleText'), expected_type=str),
duration=duration)
def _extract_chapters_from_engagement_panel(self, data, duration):
content_list = traverse_obj(
data,
('engagementPanels', ..., 'engagementPanelSectionListRenderer', 'content', 'macroMarkersListRenderer', 'contents'),
expected_type=list, default=[])
chapter_time = lambda chapter: parse_duration(self._get_text(chapter, 'timeDescription'))
chapter_title = lambda chapter: self._get_text(chapter, 'title')
return next((
filter(None, (
self._extract_chapters(
traverse_obj(contents, (..., 'macroMarkersListItemRenderer')),
chapter_time, chapter_title, duration)
for contents in content_list
))), [])
def _extract_chapters(self, chapter_list, chapter_time, chapter_title, duration):
chapters = []
last_chapter = {'start_time': 0}
for idx, chapter in enumerate(chapter_list or []):
title = chapter_title(chapter)
start_time = chapter_time(chapter)
if start_time is None:
continue
last_chapter['end_time'] = start_time
if start_time < last_chapter['start_time']:
if idx == 1:
chapters.pop()
self.report_warning('Invalid start time for chapter "%s"' % last_chapter['title'])
else:
self.report_warning(f'Invalid start time for chapter "{title}"')
continue
last_chapter = {'start_time': start_time, 'title': title}
chapters.append(last_chapter)
last_chapter['end_time'] = duration
return chapters
def _extract_yt_initial_variable(self, webpage, regex, video_id, name):
return self._parse_json(self._search_regex(
(r'%s\s*%s' % (regex, self._YT_INITIAL_BOUNDARY_RE),
regex), webpage, name, default='{}'), video_id, fatal=False)
def _extract_comment(self, comment_renderer, parent=None):
comment_id = comment_renderer.get('commentId')
if not comment_id:
return
text = self._get_text(comment_renderer, 'contentText')
# note: timestamp is an estimate calculated from the current time and time_text
timestamp, time_text = self._extract_time_text(comment_renderer, 'publishedTimeText')
author = self._get_text(comment_renderer, 'authorText')
author_id = try_get(comment_renderer,
lambda x: x['authorEndpoint']['browseEndpoint']['browseId'], compat_str)
votes = parse_count(try_get(comment_renderer, (lambda x: x['voteCount']['simpleText'],
lambda x: x['likeCount']), compat_str)) or 0
author_thumbnail = try_get(comment_renderer,
lambda x: x['authorThumbnail']['thumbnails'][-1]['url'], compat_str)
author_is_uploader = try_get(comment_renderer, lambda x: x['authorIsChannelOwner'], bool)
is_favorited = 'creatorHeart' in (try_get(
comment_renderer, lambda x: x['actionButtons']['commentActionButtonsRenderer'], dict) or {})
return {
'id': comment_id,
'text': text,
'timestamp': timestamp,
'time_text': time_text,
'like_count': votes,
'is_favorited': is_favorited,
'author': author,
'author_id': author_id,
'author_thumbnail': author_thumbnail,
'author_is_uploader': author_is_uploader,
'parent': parent or 'root'
}
def _comment_entries(self, root_continuation_data, ytcfg, video_id, parent=None, tracker=None):
get_single_config_arg = lambda c: self._configuration_arg(c, [''])[0]
def extract_header(contents):
_continuation = None
for content in contents:
comments_header_renderer = traverse_obj(content, 'commentsHeaderRenderer')
expected_comment_count = self._get_count(
comments_header_renderer, 'countText', 'commentsCount')
if expected_comment_count:
tracker['est_total'] = expected_comment_count
self.to_screen(f'Downloading ~{expected_comment_count} comments')
comment_sort_index = int(get_single_config_arg('comment_sort') != 'top') # 1 = new, 0 = top
sort_menu_item = try_get(
comments_header_renderer,
lambda x: x['sortMenu']['sortFilterSubMenuRenderer']['subMenuItems'][comment_sort_index], dict) or {}
sort_continuation_ep = sort_menu_item.get('serviceEndpoint') or {}
_continuation = self._extract_continuation_ep_data(sort_continuation_ep) or self._extract_continuation(sort_menu_item)
if not _continuation:
continue
sort_text = str_or_none(sort_menu_item.get('title'))
if not sort_text:
sort_text = 'top comments' if comment_sort_index == 0 else 'newest first'
self.to_screen('Sorting comments by %s' % sort_text.lower())
break
return _continuation
def extract_thread(contents):
if not parent:
tracker['current_page_thread'] = 0
for content in contents:
if not parent and tracker['total_parent_comments'] >= max_parents:
yield
comment_thread_renderer = try_get(content, lambda x: x['commentThreadRenderer'])
comment_renderer = get_first(
(comment_thread_renderer, content), [['commentRenderer', ('comment', 'commentRenderer')]],
expected_type=dict, default={})
comment = self._extract_comment(comment_renderer, parent)
if not comment:
continue
tracker['running_total'] += 1
tracker['total_reply_comments' if parent else 'total_parent_comments'] += 1
yield comment
# Attempt to get the replies
comment_replies_renderer = try_get(
comment_thread_renderer, lambda x: x['replies']['commentRepliesRenderer'], dict)
if comment_replies_renderer:
tracker['current_page_thread'] += 1
comment_entries_iter = self._comment_entries(
comment_replies_renderer, ytcfg, video_id,
parent=comment.get('id'), tracker=tracker)
for reply_comment in itertools.islice(comment_entries_iter, min(max_replies_per_thread, max(0, max_replies - tracker['total_reply_comments']))):
yield reply_comment
# Keeps track of counts across recursive calls
if not tracker:
tracker = dict(
running_total=0,
est_total=0,
current_page_thread=0,
total_parent_comments=0,
total_reply_comments=0)
# TODO: Deprecated
# YouTube comments have a max depth of 2
max_depth = int_or_none(get_single_config_arg('max_comment_depth'))
if max_depth:
self._downloader.deprecation_warning(
'[youtube] max_comment_depth extractor argument is deprecated. Set max replies in the max-comments extractor argument instead.')
if max_depth == 1 and parent:
return
max_comments, max_parents, max_replies, max_replies_per_thread, *_ = map(
lambda p: int_or_none(p, default=sys.maxsize), self._configuration_arg('max_comments', ) + [''] * 4)
continuation = self._extract_continuation(root_continuation_data)
message = self._get_text(root_continuation_data, ('contents', ..., 'messageRenderer', 'text'), max_runs=1)
if message and not parent:
self.report_warning(message, video_id=video_id)
response = None
is_first_continuation = parent is None
for page_num in itertools.count(0):
if not continuation:
break
headers = self.generate_api_headers(ytcfg=ytcfg, visitor_data=self._extract_visitor_data(response))
comment_prog_str = f"({tracker['running_total']}/{tracker['est_total']})"
if page_num == 0:
if is_first_continuation:
note_prefix = 'Downloading comment section API JSON'
else:
note_prefix = ' Downloading comment API JSON reply thread %d %s' % (
tracker['current_page_thread'], comment_prog_str)
else:
note_prefix = '%sDownloading comment%s API JSON page %d %s' % (
' ' if parent else '', ' replies' if parent else '',
page_num, comment_prog_str)
response = self._extract_response(
item_id=None, query=continuation,
ep='next', ytcfg=ytcfg, headers=headers, note=note_prefix,
check_get_keys='onResponseReceivedEndpoints')
continuation_contents = traverse_obj(
response, 'onResponseReceivedEndpoints', expected_type=list, default=[])
continuation = None
for continuation_section in continuation_contents:
continuation_items = traverse_obj(
continuation_section,
(('reloadContinuationItemsCommand', 'appendContinuationItemsAction'), 'continuationItems'),
get_all=False, expected_type=list) or []
if is_first_continuation:
continuation = extract_header(continuation_items)
is_first_continuation = False
if continuation:
break
continue
for entry in extract_thread(continuation_items):
if not entry:
return
yield entry
continuation = self._extract_continuation({'contents': continuation_items})
if continuation:
break
def _get_comments(self, ytcfg, video_id, contents, webpage):
"""Entry for comment extraction"""
def _real_comment_extract(contents):
renderer = next((
item for item in traverse_obj(contents, (..., 'itemSectionRenderer'), default={})
if item.get('sectionIdentifier') == 'comment-item-section'), None)
yield from self._comment_entries(renderer, ytcfg, video_id)
max_comments = int_or_none(self._configuration_arg('max_comments', [''])[0])
return itertools.islice(_real_comment_extract(contents), 0, max_comments)
@staticmethod
def _get_checkok_params():
return {'contentCheckOk': True, 'racyCheckOk': True}
@classmethod
def _generate_player_context(cls, sts=None):
context = {
'html5Preference': 'HTML5_PREF_WANTS',
}
if sts is not None:
context['signatureTimestamp'] = sts
return {
'playbackContext': {
'contentPlaybackContext': context
},
**cls._get_checkok_params()
}
@staticmethod
def _is_agegated(player_response):
if traverse_obj(player_response, ('playabilityStatus', 'desktopLegacyAgeGateReason')):
return True
reasons = traverse_obj(player_response, ('playabilityStatus', ('status', 'reason')), default=[])
AGE_GATE_REASONS = (
'confirm your age', 'age-restricted', 'inappropriate', # reason
'age_verification_required', 'age_check_required', # status
)
return any(expected in reason for expected in AGE_GATE_REASONS for reason in reasons)
@staticmethod
def _is_unplayable(player_response):
return traverse_obj(player_response, ('playabilityStatus', 'status')) == 'UNPLAYABLE'
def _extract_player_response(self, client, video_id, master_ytcfg, player_ytcfg, player_url, initial_pr):
session_index = self._extract_session_index(player_ytcfg, master_ytcfg)
syncid = self._extract_account_syncid(player_ytcfg, master_ytcfg, initial_pr)
sts = self._extract_signature_timestamp(video_id, player_url, master_ytcfg, fatal=False) if player_url else None
headers = self.generate_api_headers(
ytcfg=player_ytcfg, account_syncid=syncid, session_index=session_index, default_client=client)
yt_query = {'videoId': video_id}
yt_query.update(self._generate_player_context(sts))
return self._extract_response(
item_id=video_id, ep='player', query=yt_query,
ytcfg=player_ytcfg, headers=headers, fatal=True,
default_client=client,
note='Downloading %s player API JSON' % client.replace('_', ' ').strip()
) or None
def _get_requested_clients(self, url, smuggled_data):
requested_clients = []
default = ['android', 'web']
allowed_clients = sorted(
[client for client in INNERTUBE_CLIENTS.keys() if client[:1] != '_'],
key=lambda client: INNERTUBE_CLIENTS[client]['priority'], reverse=True)
for client in self._configuration_arg('player_client'):
if client in allowed_clients:
requested_clients.append(client)
elif client == 'default':
requested_clients.extend(default)
elif client == 'all':
requested_clients.extend(allowed_clients)
else:
self.report_warning(f'Skipping unsupported client {client}')
if not requested_clients:
requested_clients = default
if smuggled_data.get('is_music_url') or self.is_music_url(url):
requested_clients.extend(
f'{client}_music' for client in requested_clients if f'{client}_music' in INNERTUBE_CLIENTS)
return orderedSet(requested_clients)
def _extract_player_ytcfg(self, client, video_id):
url = {
'web_music': 'https://music.youtube.com',
'web_embedded': f'https://www.youtube.com/embed/{video_id}?html5=1'
}.get(client)
if not url:
return {}
webpage = self._download_webpage(url, video_id, fatal=False, note='Downloading %s config' % client.replace('_', ' ').strip())
return self.extract_ytcfg(video_id, webpage) or {}
def _extract_player_responses(self, clients, video_id, webpage, master_ytcfg):
initial_pr = None
if webpage:
initial_pr = self._extract_yt_initial_variable(
webpage, self._YT_INITIAL_PLAYER_RESPONSE_RE,
video_id, 'initial player response')
all_clients = set(clients)
clients = clients[::-1]
prs = []
def append_client(*client_names):
""" Append the first client name that exists but not already used """
for client_name in client_names:
actual_client = _split_innertube_client(client_name)[0]
if actual_client in INNERTUBE_CLIENTS:
if actual_client not in all_clients:
clients.append(client_name)
all_clients.add(actual_client)
return
# Android player_response does not have microFormats which are needed for
# extraction of some data. So we return the initial_pr with formats
# stripped out even if not requested by the user
# See: https://github.com/yt-dlp/yt-dlp/issues/501
if initial_pr:
pr = dict(initial_pr)
pr['streamingData'] = None
prs.append(pr)
last_error = None
tried_iframe_fallback = False
player_url = None
while clients:
client, base_client, variant = _split_innertube_client(clients.pop())
player_ytcfg = master_ytcfg if client == 'web' else {}
if 'configs' not in self._configuration_arg('player_skip'):
player_ytcfg = self._extract_player_ytcfg(client, video_id) or player_ytcfg
player_url = player_url or self._extract_player_url(master_ytcfg, player_ytcfg, webpage=webpage)
require_js_player = self._get_default_ytcfg(client).get('REQUIRE_JS_PLAYER')
if 'js' in self._configuration_arg('player_skip'):
require_js_player = False
player_url = None
if not player_url and not tried_iframe_fallback and require_js_player:
player_url = self._download_player_url(video_id)
tried_iframe_fallback = True
try:
pr = initial_pr if client == 'web' and initial_pr else self._extract_player_response(
client, video_id, player_ytcfg or master_ytcfg, player_ytcfg, player_url if require_js_player else None, initial_pr)
except ExtractorError as e:
if last_error:
self.report_warning(last_error)
last_error = e
continue
if pr:
prs.append(pr)
# creator clients can bypass AGE_VERIFICATION_REQUIRED if logged in
if variant == 'embedded' and self._is_unplayable(pr) and self.is_authenticated:
append_client(f'{base_client}_creator')
elif self._is_agegated(pr):
if variant == 'tv_embedded':
append_client(f'{base_client}_embedded')
elif not variant:
append_client(f'tv_embedded.{base_client}', f'{base_client}_embedded')
if last_error:
if not len(prs):
raise last_error
self.report_warning(last_error)
return prs, player_url
def _extract_formats(self, streaming_data, video_id, player_url, is_live, duration):
itags, stream_ids = {}, []
itag_qualities, res_qualities = {}, {}
q = qualities([
# Normally tiny is the smallest video-only formats. But
# audio-only formats with unknown quality may get tagged as tiny
'tiny',
'audio_quality_ultralow', 'audio_quality_low', 'audio_quality_medium', 'audio_quality_high', # Audio only formats
'small', 'medium', 'large', 'hd720', 'hd1080', 'hd1440', 'hd2160', 'hd2880', 'highres'
])
streaming_formats = traverse_obj(streaming_data, (..., ('formats', 'adaptiveFormats'), ...), default=[])
for fmt in streaming_formats:
if fmt.get('targetDurationSec'):
continue
itag = str_or_none(fmt.get('itag'))
audio_track = fmt.get('audioTrack') or {}
stream_id = '%s.%s' % (itag or '', audio_track.get('id', ''))
if stream_id in stream_ids:
continue
quality = fmt.get('quality')
height = int_or_none(fmt.get('height'))
if quality == 'tiny' or not quality:
quality = fmt.get('audioQuality', '').lower() or quality
# The 3gp format (17) in android client has a quality of "small",
# but is actually worse than other formats
if itag == '17':
quality = 'tiny'
if quality:
if itag:
itag_qualities[itag] = quality
if height:
res_qualities[height] = quality
# FORMAT_STREAM_TYPE_OTF(otf=1) requires downloading the init fragment
# (adding `&sq=0` to the URL) and parsing emsg box to determine the
# number of fragment that would subsequently requested with (`&sq=N`)
if fmt.get('type') == 'FORMAT_STREAM_TYPE_OTF':
continue
fmt_url = fmt.get('url')
if not fmt_url:
sc = compat_parse_qs(fmt.get('signatureCipher'))
fmt_url = url_or_none(try_get(sc, lambda x: x['url'][0]))
encrypted_sig = try_get(sc, lambda x: x['s'][0])
if not (sc and fmt_url and encrypted_sig):
continue
if not player_url:
continue
signature = self._decrypt_signature(sc['s'][0], video_id, player_url)
sp = try_get(sc, lambda x: x['sp'][0]) or 'signature'
fmt_url += '&' + sp + '=' + signature
query = parse_qs(fmt_url)
throttled = False
if query.get('n'):
try:
fmt_url = update_url_query(fmt_url, {
'n': self._decrypt_nsig(query['n'][0], video_id, player_url)})
except ExtractorError as e:
self.report_warning(
f'nsig extraction failed: You may experience throttling for some formats\n'
f'n = {query["n"][0]} ; player = {player_url}\n{e}', only_once=True)
throttled = True
if itag:
itags[itag] = 'https'
stream_ids.append(stream_id)
tbr = float_or_none(fmt.get('averageBitrate') or fmt.get('bitrate'), 1000)
language_preference = (
10 if audio_track.get('audioIsDefault') and 10
else -10 if 'descriptive' in (audio_track.get('displayName') or '').lower() and -10
else -1)
# Some formats may have much smaller duration than others (possibly damaged during encoding)
# Eg: 2-nOtRESiUc Ref: https://github.com/yt-dlp/yt-dlp/issues/2823
# Make sure to avoid false positives with small duration differences.
# Eg: __2ABJjxzNo, ySuUZEjARPY
is_damaged = try_get(fmt, lambda x: float(x['approxDurationMs']) / duration < 500)
if is_damaged:
self.report_warning(f'{video_id}: Some formats are possibly damaged. They will be deprioritized', only_once=True)
dct = {
'asr': int_or_none(fmt.get('audioSampleRate')),
'filesize': int_or_none(fmt.get('contentLength')),
'format_id': itag,
'format_note': join_nonempty(
'%s%s' % (audio_track.get('displayName') or '',
' (default)' if language_preference > 0 else ''),
fmt.get('qualityLabel') or quality.replace('audio_quality_', ''),
throttled and 'THROTTLED', is_damaged and 'DAMAGED', delim=', '),
'source_preference': -10 if throttled else -1,
'fps': int_or_none(fmt.get('fps')) or None,
'height': height,
'quality': q(quality),
'has_drm': bool(fmt.get('drmFamilies')),
'tbr': tbr,
'url': fmt_url,
'width': int_or_none(fmt.get('width')),
'language': join_nonempty(audio_track.get('id', '').split('.')[0],
'desc' if language_preference < -1 else ''),
'language_preference': language_preference,
# Strictly de-prioritize damaged and 3gp formats
'preference': -10 if is_damaged else -2 if itag == '17' else None,
}
mime_mobj = re.match(
r'((?:[^/]+)/(?:[^;]+))(?:;\s*codecs="([^"]+)")?', fmt.get('mimeType') or '')
if mime_mobj:
dct['ext'] = mimetype2ext(mime_mobj.group(1))
dct.update(parse_codecs(mime_mobj.group(2)))
no_audio = dct.get('acodec') == 'none'
no_video = dct.get('vcodec') == 'none'
if no_audio:
dct['vbr'] = tbr
if no_video:
dct['abr'] = tbr
if no_audio or no_video:
dct['downloader_options'] = {
# Youtube throttles chunks >~10M
'http_chunk_size': 10485760,
}
if dct.get('ext'):
dct['container'] = dct['ext'] + '_dash'
yield dct
live_from_start = is_live and self.get_param('live_from_start')
skip_manifests = self._configuration_arg('skip')
if not self.get_param('youtube_include_hls_manifest', True):
skip_manifests.append('hls')
get_dash = 'dash' not in skip_manifests and (
not is_live or live_from_start or self._configuration_arg('include_live_dash'))
get_hls = not live_from_start and 'hls' not in skip_manifests
def process_manifest_format(f, proto, itag):
if itag in itags:
if itags[itag] == proto or f'{itag}-{proto}' in itags:
return False
itag = f'{itag}-{proto}'
if itag:
f['format_id'] = itag
itags[itag] = proto
f['quality'] = next((
q(qdict[val])
for val, qdict in ((f.get('format_id', '').split('-')[0], itag_qualities), (f.get('height'), res_qualities))
if val in qdict), -1)
return True
for sd in streaming_data:
hls_manifest_url = get_hls and sd.get('hlsManifestUrl')
if hls_manifest_url:
for f in self._extract_m3u8_formats(hls_manifest_url, video_id, 'mp4', fatal=False):
if process_manifest_format(f, 'hls', self._search_regex(
r'/itag/(\d+)', f['url'], 'itag', default=None)):
yield f
dash_manifest_url = get_dash and sd.get('dashManifestUrl')
if dash_manifest_url:
for f in self._extract_mpd_formats(dash_manifest_url, video_id, fatal=False):
if process_manifest_format(f, 'dash', f['format_id']):
f['filesize'] = int_or_none(self._search_regex(
r'/clen/(\d+)', f.get('fragment_base_url') or f['url'], 'file size', default=None))
if live_from_start:
f['is_from_start'] = True
yield f
def _extract_storyboard(self, player_responses, duration):
spec = get_first(
player_responses, ('storyboards', 'playerStoryboardSpecRenderer', 'spec'), default='').split('|')[::-1]
base_url = url_or_none(urljoin('https://i.ytimg.com/', spec.pop() or None))
if not base_url:
return
L = len(spec) - 1
for i, args in enumerate(spec):
args = args.split('#')
counts = list(map(int_or_none, args[:5]))
if len(args) != 8 or not all(counts):
self.report_warning(f'Malformed storyboard {i}: {"#".join(args)}{bug_reports_message()}')
continue
width, height, frame_count, cols, rows = counts
N, sigh = args[6:]
url = base_url.replace('$L', str(L - i)).replace('$N', N) + f'&sigh={sigh}'
fragment_count = frame_count / (cols * rows)
fragment_duration = duration / fragment_count
yield {
'format_id': f'sb{i}',
'format_note': 'storyboard',
'ext': 'mhtml',
'protocol': 'mhtml',
'acodec': 'none',
'vcodec': 'none',
'url': url,
'width': width,
'height': height,
'fragments': [{
'url': url.replace('$M', str(j)),
'duration': min(fragment_duration, duration - (j * fragment_duration)),
} for j in range(math.ceil(fragment_count))],
}
def _download_player_responses(self, url, smuggled_data, video_id, webpage_url):
webpage = None
if 'webpage' not in self._configuration_arg('player_skip'):
webpage = self._download_webpage(
webpage_url + '&bpctr=9999999999&has_verified=1', video_id, fatal=False)
master_ytcfg = self.extract_ytcfg(video_id, webpage) or self._get_default_ytcfg()
player_responses, player_url = self._extract_player_responses(
self._get_requested_clients(url, smuggled_data),
video_id, webpage, master_ytcfg)
return webpage, master_ytcfg, player_responses, player_url
def _list_formats(self, video_id, microformats, video_details, player_responses, player_url, duration=None):
live_broadcast_details = traverse_obj(microformats, (..., 'liveBroadcastDetails'))
is_live = get_first(video_details, 'isLive')
if is_live is None:
is_live = get_first(live_broadcast_details, 'isLiveNow')
streaming_data = traverse_obj(player_responses, (..., 'streamingData'), default=[])
formats = list(self._extract_formats(streaming_data, video_id, player_url, is_live, duration))
return live_broadcast_details, is_live, streaming_data, formats
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
video_id = self._match_id(url)
base_url = self.http_scheme() + '//www.youtube.com/'
webpage_url = base_url + 'watch?v=' + video_id
webpage, master_ytcfg, player_responses, player_url = self._download_player_responses(url, smuggled_data, video_id, webpage_url)
playability_statuses = traverse_obj(
player_responses, (..., 'playabilityStatus'), expected_type=dict, default=[])
trailer_video_id = get_first(
playability_statuses,
('errorScreen', 'playerLegacyDesktopYpcTrailerRenderer', 'trailerVideoId'),
expected_type=str)
if trailer_video_id:
return self.url_result(
trailer_video_id, self.ie_key(), trailer_video_id)
search_meta = ((lambda x: self._html_search_meta(x, webpage, default=None))
if webpage else (lambda x: None))
video_details = traverse_obj(
player_responses, (..., 'videoDetails'), expected_type=dict, default=[])
microformats = traverse_obj(
player_responses, (..., 'microformat', 'playerMicroformatRenderer'),
expected_type=dict, default=[])
video_title = (
get_first(video_details, 'title')
or self._get_text(microformats, (..., 'title'))
or search_meta(['og:title', 'twitter:title', 'title']))
video_description = get_first(video_details, 'shortDescription')
multifeed_metadata_list = get_first(
player_responses,
('multicamera', 'playerLegacyMulticameraRenderer', 'metadataList'),
expected_type=str)
if multifeed_metadata_list and not smuggled_data.get('force_singlefeed'):
if self.get_param('noplaylist'):
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
else:
entries = []
feed_ids = []
for feed in multifeed_metadata_list.split(','):
# Unquote should take place before split on comma (,) since textual
# fields may contain comma as well (see
# https://github.com/ytdl-org/youtube-dl/issues/8536)
feed_data = compat_parse_qs(
compat_urllib_parse_unquote_plus(feed))
def feed_entry(name):
return try_get(
feed_data, lambda x: x[name][0], compat_str)
feed_id = feed_entry('id')
if not feed_id:
continue
feed_title = feed_entry('title')
title = video_title
if feed_title:
title += ' (%s)' % feed_title
entries.append({
'_type': 'url_transparent',
'ie_key': 'Youtube',
'url': smuggle_url(
'%swatch?v=%s' % (base_url, feed_data['id'][0]),
{'force_singlefeed': True}),
'title': title,
})
feed_ids.append(feed_id)
self.to_screen(
'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
% (', '.join(feed_ids), video_id))
return self.playlist_result(
entries, video_id, video_title, video_description)
duration = int_or_none(
get_first(video_details, 'lengthSeconds')
or get_first(microformats, 'lengthSeconds')
or parse_duration(search_meta('duration'))) or None
live_broadcast_details, is_live, streaming_data, formats = self._list_formats(
video_id, microformats, video_details, player_responses, player_url, duration)
if not formats:
if not self.get_param('allow_unplayable_formats') and traverse_obj(streaming_data, (..., 'licenseInfos')):
self.report_drm(video_id)
pemr = get_first(
playability_statuses,
('errorScreen', 'playerErrorMessageRenderer'), expected_type=dict) or {}
reason = self._get_text(pemr, 'reason') or get_first(playability_statuses, 'reason')
subreason = clean_html(self._get_text(pemr, 'subreason') or '')
if subreason:
if subreason == 'The uploader has not made this video available in your country.':
countries = get_first(microformats, 'availableCountries')
if not countries:
regions_allowed = search_meta('regionsAllowed')
countries = regions_allowed.split(',') if regions_allowed else None
self.raise_geo_restricted(subreason, countries, metadata_available=True)
reason += f'. {subreason}'
if reason:
self.raise_no_formats(reason, expected=True)
keywords = get_first(video_details, 'keywords', expected_type=list) or []
if not keywords and webpage:
keywords = [
unescapeHTML(m.group('content'))
for m in re.finditer(self._meta_regex('og:video:tag'), webpage)]
for keyword in keywords:
if keyword.startswith('yt:stretch='):
mobj = re.search(r'(\d+)\s*:\s*(\d+)', keyword)
if mobj:
# NB: float is intentional for forcing float division
w, h = (float(v) for v in mobj.groups())
if w > 0 and h > 0:
ratio = w / h
for f in formats:
if f.get('vcodec') != 'none':
f['stretched_ratio'] = ratio
break
thumbnails = self._extract_thumbnails((video_details, microformats), (..., ..., 'thumbnail'))
thumbnail_url = search_meta(['og:image', 'twitter:image'])
if thumbnail_url:
thumbnails.append({
'url': thumbnail_url,
})
original_thumbnails = thumbnails.copy()
# The best resolution thumbnails sometimes does not appear in the webpage
# See: https://github.com/ytdl-org/youtube-dl/issues/29049, https://github.com/yt-dlp/yt-dlp/issues/340
# List of possible thumbnails - Ref: <https://stackoverflow.com/a/20542029>
thumbnail_names = [
'maxresdefault', 'hq720', 'sddefault', 'sd1', 'sd2', 'sd3',
'hqdefault', 'hq1', 'hq2', 'hq3', '0',
'mqdefault', 'mq1', 'mq2', 'mq3',
'default', '1', '2', '3'
]
n_thumbnail_names = len(thumbnail_names)
thumbnails.extend({
'url': 'https://i.ytimg.com/vi{webp}/{video_id}/{name}{live}.{ext}'.format(
video_id=video_id, name=name, ext=ext,
webp='_webp' if ext == 'webp' else '', live='_live' if is_live else ''),
} for name in thumbnail_names for ext in ('webp', 'jpg'))
for thumb in thumbnails:
i = next((i for i, t in enumerate(thumbnail_names) if f'/{video_id}/{t}' in thumb['url']), n_thumbnail_names)
thumb['preference'] = (0 if '.webp' in thumb['url'] else -1) - (2 * i)
self._remove_duplicate_formats(thumbnails)
self._downloader._sort_thumbnails(original_thumbnails)
category = get_first(microformats, 'category') or search_meta('genre')
channel_id = str_or_none(
get_first(video_details, 'channelId')
or get_first(microformats, 'externalChannelId')
or search_meta('channelId'))
owner_profile_url = get_first(microformats, 'ownerProfileUrl')
live_content = get_first(video_details, 'isLiveContent')
is_upcoming = get_first(video_details, 'isUpcoming')
if is_live is None:
if is_upcoming or live_content is False:
is_live = False
if is_upcoming is None and (live_content or is_live):
is_upcoming = False
live_start_time = parse_iso8601(get_first(live_broadcast_details, 'startTimestamp'))
live_end_time = parse_iso8601(get_first(live_broadcast_details, 'endTimestamp'))
if not duration and live_end_time and live_start_time:
duration = live_end_time - live_start_time
if is_live and self.get_param('live_from_start'):
self._prepare_live_from_start_formats(formats, video_id, live_start_time, url, webpage_url, smuggled_data)
formats.extend(self._extract_storyboard(player_responses, duration))
# Source is given priority since formats that throttle are given lower source_preference
# When throttling issue is fully fixed, remove this
self._sort_formats(formats, ('quality', 'res', 'fps', 'hdr:12', 'source', 'codec:vp9.2', 'lang', 'proto'))
info = {
'id': video_id,
'title': video_title,
'formats': formats,
'thumbnails': thumbnails,
# The best thumbnail that we are sure exists. Prevents unnecessary
# URL checking if user don't care about getting the best possible thumbnail
'thumbnail': traverse_obj(original_thumbnails, (-1, 'url')),
'description': video_description,
'uploader': get_first(video_details, 'author'),
'uploader_id': self._search_regex(r'/(?:channel|user)/([^/?&#]+)', owner_profile_url, 'uploader id') if owner_profile_url else None,
'uploader_url': owner_profile_url,
'channel_id': channel_id,
'channel_url': format_field(channel_id, template='https://www.youtube.com/channel/%s'),
'duration': duration,
'view_count': int_or_none(
get_first((video_details, microformats), (..., 'viewCount'))
or search_meta('interactionCount')),
'average_rating': float_or_none(get_first(video_details, 'averageRating')),
'age_limit': 18 if (
get_first(microformats, 'isFamilySafe') is False
or search_meta('isFamilyFriendly') == 'false'
or search_meta('og:restrictions:age') == '18+') else 0,
'webpage_url': webpage_url,
'categories': [category] if category else None,
'tags': keywords,
'playable_in_embed': get_first(playability_statuses, 'playableInEmbed'),
'is_live': is_live,
'was_live': (False if is_live or is_upcoming or live_content is False
else None if is_live is None or is_upcoming is None
else live_content),
'live_status': 'is_upcoming' if is_upcoming else None, # rest will be set by YoutubeDL
'release_timestamp': live_start_time,
}
pctr = traverse_obj(player_responses, (..., 'captions', 'playerCaptionsTracklistRenderer'), expected_type=dict)
if pctr:
def get_lang_code(track):
return (remove_start(track.get('vssId') or '', '.').replace('.', '-')
or track.get('languageCode'))
# Converted into dicts to remove duplicates
captions = {
get_lang_code(sub): sub
for sub in traverse_obj(pctr, (..., 'captionTracks', ...), default=[])}
translation_languages = {
lang.get('languageCode'): self._get_text(lang.get('languageName'), max_runs=1)
for lang in traverse_obj(pctr, (..., 'translationLanguages', ...), default=[])}
def process_language(container, base_url, lang_code, sub_name, query):
lang_subs = container.setdefault(lang_code, [])
for fmt in self._SUBTITLE_FORMATS:
query.update({
'fmt': fmt,
})
lang_subs.append({
'ext': fmt,
'url': urljoin('https://www.youtube.com', update_url_query(base_url, query)),
'name': sub_name,
})
subtitles, automatic_captions = {}, {}
for lang_code, caption_track in captions.items():
base_url = caption_track.get('baseUrl')
orig_lang = parse_qs(base_url).get('lang', [None])[-1]
if not base_url:
continue
lang_name = self._get_text(caption_track, 'name', max_runs=1)
if caption_track.get('kind') != 'asr':
if not lang_code:
continue
process_language(
subtitles, base_url, lang_code, lang_name, {})
if not caption_track.get('isTranslatable'):
continue
for trans_code, trans_name in translation_languages.items():
if not trans_code:
continue
orig_trans_code = trans_code
if caption_track.get('kind') != 'asr':
if 'translated_subs' in self._configuration_arg('skip'):
continue
trans_code += f'-{lang_code}'
trans_name += format_field(lang_name, template=' from %s')
# Add an "-orig" label to the original language so that it can be distinguished.
# The subs are returned without "-orig" as well for compatibility
if lang_code == f'a-{orig_trans_code}':
process_language(
automatic_captions, base_url, f'{trans_code}-orig', f'{trans_name} (Original)', {})
# Setting tlang=lang returns damaged subtitles.
process_language(automatic_captions, base_url, trans_code, trans_name,
{} if orig_lang == orig_trans_code else {'tlang': trans_code})
info['automatic_captions'] = automatic_captions
info['subtitles'] = subtitles
parsed_url = compat_urllib_parse_urlparse(url)
for component in [parsed_url.fragment, parsed_url.query]:
query = compat_parse_qs(component)
for k, v in query.items():
for d_k, s_ks in [('start', ('start', 't')), ('end', ('end',))]:
d_k += '_time'
if d_k not in info and k in s_ks:
info[d_k] = parse_duration(query[k][0])
# Youtube Music Auto-generated description
if video_description:
mobj = re.search(r'(?s)(?P<track>[^·\n]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?.+\nAuto-generated by YouTube\.\s*$', video_description)
if mobj:
release_year = mobj.group('release_year')
release_date = mobj.group('release_date')
if release_date:
release_date = release_date.replace('-', '')
if not release_year:
release_year = release_date[:4]
info.update({
'album': mobj.group('album'.strip()),
'artist': mobj.group('clean_artist') or ', '.join(a.strip() for a in mobj.group('artist').split('·')),
'track': mobj.group('track').strip(),
'release_date': release_date,
'release_year': int_or_none(release_year),
})
initial_data = None
if webpage:
initial_data = self._extract_yt_initial_variable(
webpage, self._YT_INITIAL_DATA_RE, video_id,
'yt initial data')
if not initial_data:
query = {'videoId': video_id}
query.update(self._get_checkok_params())
initial_data = self._extract_response(
item_id=video_id, ep='next', fatal=False,
ytcfg=master_ytcfg, query=query,
headers=self.generate_api_headers(ytcfg=master_ytcfg),
note='Downloading initial data API JSON')
try:
# This will error if there is no livechat
initial_data['contents']['twoColumnWatchNextResults']['conversationBar']['liveChatRenderer']['continuations'][0]['reloadContinuationData']['continuation']
info.setdefault('subtitles', {})['live_chat'] = [{
'url': 'https://www.youtube.com/watch?v=%s' % video_id, # url is needed to set cookies
'video_id': video_id,
'ext': 'json',
'protocol': 'youtube_live_chat' if is_live or is_upcoming else 'youtube_live_chat_replay',
}]
except (KeyError, IndexError, TypeError):
pass
if initial_data:
info['chapters'] = (
self._extract_chapters_from_json(initial_data, duration)
or self._extract_chapters_from_engagement_panel(initial_data, duration)
or None)
contents = traverse_obj(
initial_data, ('contents', 'twoColumnWatchNextResults', 'results', 'results', 'contents'),
expected_type=list, default=[])
vpir = get_first(contents, 'videoPrimaryInfoRenderer')
if vpir:
stl = vpir.get('superTitleLink')
if stl:
stl = self._get_text(stl)
if try_get(
vpir,
lambda x: x['superTitleIcon']['iconType']) == 'LOCATION_PIN':
info['location'] = stl
else:
mobj = re.search(r'(.+?)\s*S(\d+)\s*•\s*E(\d+)', stl)
if mobj:
info.update({
'series': mobj.group(1),
'season_number': int(mobj.group(2)),
'episode_number': int(mobj.group(3)),
})
for tlb in (try_get(
vpir,
lambda x: x['videoActions']['menuRenderer']['topLevelButtons'],
list) or []):
tbr = tlb.get('toggleButtonRenderer') or {}
for getter, regex in [(
lambda x: x['defaultText']['accessibility']['accessibilityData'],
r'(?P<count>[\d,]+)\s*(?P<type>(?:dis)?like)'), ([
lambda x: x['accessibility'],
lambda x: x['accessibilityData']['accessibilityData'],
], r'(?P<type>(?:dis)?like) this video along with (?P<count>[\d,]+) other people')]:
label = (try_get(tbr, getter, dict) or {}).get('label')
if label:
mobj = re.match(regex, label)
if mobj:
info[mobj.group('type') + '_count'] = str_to_int(mobj.group('count'))
break
sbr_tooltip = try_get(
vpir, lambda x: x['sentimentBar']['sentimentBarRenderer']['tooltip'])
if sbr_tooltip:
like_count, dislike_count = sbr_tooltip.split(' / ')
info.update({
'like_count': str_to_int(like_count),
'dislike_count': str_to_int(dislike_count),
})
vsir = get_first(contents, 'videoSecondaryInfoRenderer')
if vsir:
vor = traverse_obj(vsir, ('owner', 'videoOwnerRenderer'))
info.update({
'channel': self._get_text(vor, 'title'),
'channel_follower_count': self._get_count(vor, 'subscriberCountText')})
rows = try_get(
vsir,
lambda x: x['metadataRowContainer']['metadataRowContainerRenderer']['rows'],
list) or []
multiple_songs = False
for row in rows:
if try_get(row, lambda x: x['metadataRowRenderer']['hasDividerLine']) is True:
multiple_songs = True
break
for row in rows:
mrr = row.get('metadataRowRenderer') or {}
mrr_title = mrr.get('title')
if not mrr_title:
continue
mrr_title = self._get_text(mrr, 'title')
mrr_contents_text = self._get_text(mrr, ('contents', 0))
if mrr_title == 'License':
info['license'] = mrr_contents_text
elif not multiple_songs:
if mrr_title == 'Album':
info['album'] = mrr_contents_text
elif mrr_title == 'Artist':
info['artist'] = mrr_contents_text
elif mrr_title == 'Song':
info['track'] = mrr_contents_text
fallbacks = {
'channel': 'uploader',
'channel_id': 'uploader_id',
'channel_url': 'uploader_url',
}
# The upload date for scheduled, live and past live streams / premieres in microformats
# may be different from the stream date. Although not in UTC, we will prefer it in this case.
# See: https://github.com/yt-dlp/yt-dlp/pull/2223#issuecomment-1008485139
upload_date = (
unified_strdate(get_first(microformats, 'uploadDate'))
or unified_strdate(search_meta('uploadDate')))
if not upload_date or (not info.get('is_live') and not info.get('was_live') and info.get('live_status') != 'is_upcoming'):
upload_date = strftime_or_none(self._extract_time_text(vpir, 'dateText')[0], '%Y%m%d')
info['upload_date'] = upload_date
for to, frm in fallbacks.items():
if not info.get(to):
info[to] = info.get(frm)
for s_k, d_k in [('artist', 'creator'), ('track', 'alt_title')]:
v = info.get(s_k)
if v:
info[d_k] = v
is_private = get_first(video_details, 'isPrivate', expected_type=bool)
is_unlisted = get_first(microformats, 'isUnlisted', expected_type=bool)
is_membersonly = None
is_premium = None
if initial_data and is_private is not None:
is_membersonly = False
is_premium = False
contents = try_get(initial_data, lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'], list) or []
badge_labels = set()
for content in contents:
if not isinstance(content, dict):
continue
badge_labels.update(self._extract_badges(content.get('videoPrimaryInfoRenderer')))
for badge_label in badge_labels:
if badge_label.lower() == 'members only':
is_membersonly = True
elif badge_label.lower() == 'premium':
is_premium = True
elif badge_label.lower() == 'unlisted':
is_unlisted = True
info['availability'] = self._availability(
is_private=is_private,
needs_premium=is_premium,
needs_subscription=is_membersonly,
needs_auth=info['age_limit'] >= 18,
is_unlisted=None if is_private is None else is_unlisted)
info['__post_extractor'] = self.extract_comments(master_ytcfg, video_id, contents, webpage)
self.mark_watched(video_id, player_responses)
return info
class YoutubeTabBaseInfoExtractor(YoutubeBaseInfoExtractor):
@staticmethod
def passthrough_smuggled_data(func):
def _smuggle(entries, smuggled_data):
for entry in entries:
# TODO: Convert URL to music.youtube instead.
# Do we need to passthrough any other smuggled_data?
entry['url'] = smuggle_url(entry['url'], smuggled_data)
yield entry
@functools.wraps(func)
def wrapper(self, url):
url, smuggled_data = unsmuggle_url(url, {})
if self.is_music_url(url):
smuggled_data['is_music_url'] = True
info_dict = func(self, url, smuggled_data)
if smuggled_data and info_dict.get('entries'):
info_dict['entries'] = _smuggle(info_dict['entries'], smuggled_data)
return info_dict
return wrapper
def _extract_channel_id(self, webpage):
channel_id = self._html_search_meta(
'channelId', webpage, 'channel id', default=None)
if channel_id:
return channel_id
channel_url = self._html_search_meta(
('og:url', 'al:ios:url', 'al:android:url', 'al:web:url',
'twitter:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad',
'twitter:app:url:googleplay'), webpage, 'channel url')
return self._search_regex(
r'https?://(?:www\.)?youtube\.com/channel/([^/?#&])+',
channel_url, 'channel id')
@staticmethod
def _extract_basic_item_renderer(item):
# Modified from _extract_grid_item_renderer
known_basic_renderers = (
'playlistRenderer', 'videoRenderer', 'channelRenderer', 'showRenderer', 'reelItemRenderer'
)
for key, renderer in item.items():
if not isinstance(renderer, dict):
continue
elif key in known_basic_renderers:
return renderer
elif key.startswith('grid') and key.endswith('Renderer'):
return renderer
def _grid_entries(self, grid_renderer):
for item in grid_renderer['items']:
if not isinstance(item, dict):
continue
renderer = self._extract_basic_item_renderer(item)
if not isinstance(renderer, dict):
continue
title = self._get_text(renderer, 'title')
# playlist
playlist_id = renderer.get('playlistId')
if playlist_id:
yield self.url_result(
'https://www.youtube.com/playlist?list=%s' % playlist_id,
ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
video_title=title)
continue
# video
video_id = renderer.get('videoId')
if video_id:
yield self._extract_video(renderer)
continue
# channel
channel_id = renderer.get('channelId')
if channel_id:
yield self.url_result(
'https://www.youtube.com/channel/%s' % channel_id,
ie=YoutubeTabIE.ie_key(), video_title=title)
continue
# generic endpoint URL support
ep_url = urljoin('https://www.youtube.com/', try_get(
renderer, lambda x: x['navigationEndpoint']['commandMetadata']['webCommandMetadata']['url'],
compat_str))
if ep_url:
for ie in (YoutubeTabIE, YoutubePlaylistIE, YoutubeIE):
if ie.suitable(ep_url):
yield self.url_result(
ep_url, ie=ie.ie_key(), video_id=ie._match_id(ep_url), video_title=title)
break
def _music_reponsive_list_entry(self, renderer):
video_id = traverse_obj(renderer, ('playlistItemData', 'videoId'))
if video_id:
return self.url_result(f'https://music.youtube.com/watch?v={video_id}',
ie=YoutubeIE.ie_key(), video_id=video_id)
playlist_id = traverse_obj(renderer, ('navigationEndpoint', 'watchEndpoint', 'playlistId'))
if playlist_id:
video_id = traverse_obj(renderer, ('navigationEndpoint', 'watchEndpoint', 'videoId'))
if video_id:
return self.url_result(f'https://music.youtube.com/watch?v={video_id}&list={playlist_id}',
ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
return self.url_result(f'https://music.youtube.com/playlist?list={playlist_id}',
ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
browse_id = traverse_obj(renderer, ('navigationEndpoint', 'browseEndpoint', 'browseId'))
if browse_id:
return self.url_result(f'https://music.youtube.com/browse/{browse_id}',
ie=YoutubeTabIE.ie_key(), video_id=browse_id)
def _shelf_entries_from_content(self, shelf_renderer):
content = shelf_renderer.get('content')
if not isinstance(content, dict):
return
renderer = content.get('gridRenderer') or content.get('expandedShelfContentsRenderer')
if renderer:
# TODO: add support for nested playlists so each shelf is processed
# as separate playlist
# TODO: this includes only first N items
for entry in self._grid_entries(renderer):
yield entry
renderer = content.get('horizontalListRenderer')
if renderer:
# TODO
pass
def _shelf_entries(self, shelf_renderer, skip_channels=False):
ep = try_get(
shelf_renderer, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
compat_str)
shelf_url = urljoin('https://www.youtube.com', ep)
if shelf_url:
# Skipping links to another channels, note that checking for
# endpoint.commandMetadata.webCommandMetadata.webPageTypwebPageType == WEB_PAGE_TYPE_CHANNEL
# will not work
if skip_channels and '/channels?' in shelf_url:
return
title = self._get_text(shelf_renderer, 'title')
yield self.url_result(shelf_url, video_title=title)
# Shelf may not contain shelf URL, fallback to extraction from content
for entry in self._shelf_entries_from_content(shelf_renderer):
yield entry
def _playlist_entries(self, video_list_renderer):
for content in video_list_renderer['contents']:
if not isinstance(content, dict):
continue
renderer = content.get('playlistVideoRenderer') or content.get('playlistPanelVideoRenderer')
if not isinstance(renderer, dict):
continue
video_id = renderer.get('videoId')
if not video_id:
continue
yield self._extract_video(renderer)
def _rich_entries(self, rich_grid_renderer):
renderer = try_get(
rich_grid_renderer, lambda x: x['content']['videoRenderer'], dict) or {}
video_id = renderer.get('videoId')
if not video_id:
return
yield self._extract_video(renderer)
def _video_entry(self, video_renderer):
video_id = video_renderer.get('videoId')
if video_id:
return self._extract_video(video_renderer)
def _hashtag_tile_entry(self, hashtag_tile_renderer):
url = urljoin('https://youtube.com', traverse_obj(
hashtag_tile_renderer, ('onTapCommand', 'commandMetadata', 'webCommandMetadata', 'url')))
if url:
return self.url_result(
url, ie=YoutubeTabIE.ie_key(), title=self._get_text(hashtag_tile_renderer, 'hashtag'))
def _post_thread_entries(self, post_thread_renderer):
post_renderer = try_get(
post_thread_renderer, lambda x: x['post']['backstagePostRenderer'], dict)
if not post_renderer:
return
# video attachment
video_renderer = try_get(
post_renderer, lambda x: x['backstageAttachment']['videoRenderer'], dict) or {}
video_id = video_renderer.get('videoId')
if video_id:
entry = self._extract_video(video_renderer)
if entry:
yield entry
# playlist attachment
playlist_id = try_get(
post_renderer, lambda x: x['backstageAttachment']['playlistRenderer']['playlistId'], compat_str)
if playlist_id:
yield self.url_result(
'https://www.youtube.com/playlist?list=%s' % playlist_id,
ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
# inline video links
runs = try_get(post_renderer, lambda x: x['contentText']['runs'], list) or []
for run in runs:
if not isinstance(run, dict):
continue
ep_url = try_get(
run, lambda x: x['navigationEndpoint']['urlEndpoint']['url'], compat_str)
if not ep_url:
continue
if not YoutubeIE.suitable(ep_url):
continue
ep_video_id = YoutubeIE._match_id(ep_url)
if video_id == ep_video_id:
continue
yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=ep_video_id)
def _post_thread_continuation_entries(self, post_thread_continuation):
contents = post_thread_continuation.get('contents')
if not isinstance(contents, list):
return
for content in contents:
renderer = content.get('backstagePostThreadRenderer')
if not isinstance(renderer, dict):
continue
for entry in self._post_thread_entries(renderer):
yield entry
r''' # unused
def _rich_grid_entries(self, contents):
for content in contents:
video_renderer = try_get(content, lambda x: x['richItemRenderer']['content']['videoRenderer'], dict)
if video_renderer:
entry = self._video_entry(video_renderer)
if entry:
yield entry
'''
def _extract_entries(self, parent_renderer, continuation_list):
# continuation_list is modified in-place with continuation_list = [continuation_token]
continuation_list[:] = [None]
contents = try_get(parent_renderer, lambda x: x['contents'], list) or []
for content in contents:
if not isinstance(content, dict):
continue
is_renderer = traverse_obj(
content, 'itemSectionRenderer', 'musicShelfRenderer', 'musicShelfContinuation',
expected_type=dict)
if not is_renderer:
renderer = content.get('richItemRenderer')
if renderer:
for entry in self._rich_entries(renderer):
yield entry
continuation_list[0] = self._extract_continuation(parent_renderer)
continue
isr_contents = try_get(is_renderer, lambda x: x['contents'], list) or []
for isr_content in isr_contents:
if not isinstance(isr_content, dict):
continue
known_renderers = {
'playlistVideoListRenderer': self._playlist_entries,
'gridRenderer': self._grid_entries,
'reelShelfRenderer': self._grid_entries,
'shelfRenderer': self._shelf_entries,
'musicResponsiveListItemRenderer': lambda x: [self._music_reponsive_list_entry(x)],
'backstagePostThreadRenderer': self._post_thread_entries,
'videoRenderer': lambda x: [self._video_entry(x)],
'playlistRenderer': lambda x: self._grid_entries({'items': [{'playlistRenderer': x}]}),
'channelRenderer': lambda x: self._grid_entries({'items': [{'channelRenderer': x}]}),
'hashtagTileRenderer': lambda x: [self._hashtag_tile_entry(x)]
}
for key, renderer in isr_content.items():
if key not in known_renderers:
continue
for entry in known_renderers[key](renderer):
if entry:
yield entry
continuation_list[0] = self._extract_continuation(renderer)
break
if not continuation_list[0]:
continuation_list[0] = self._extract_continuation(is_renderer)
if not continuation_list[0]:
continuation_list[0] = self._extract_continuation(parent_renderer)
def _entries(self, tab, item_id, ytcfg, account_syncid, visitor_data):
continuation_list = [None]
extract_entries = lambda x: self._extract_entries(x, continuation_list)
tab_content = try_get(tab, lambda x: x['content'], dict)
if not tab_content:
return
parent_renderer = (
try_get(tab_content, lambda x: x['sectionListRenderer'], dict)
or try_get(tab_content, lambda x: x['richGridRenderer'], dict) or {})
for entry in extract_entries(parent_renderer):
yield entry
continuation = continuation_list[0]
for page_num in itertools.count(1):
if not continuation:
break
headers = self.generate_api_headers(
ytcfg=ytcfg, account_syncid=account_syncid, visitor_data=visitor_data)
response = self._extract_response(
item_id='%s page %s' % (item_id, page_num),
query=continuation, headers=headers, ytcfg=ytcfg,
check_get_keys=('continuationContents', 'onResponseReceivedActions', 'onResponseReceivedEndpoints'))
if not response:
break
# Extracting updated visitor data is required to prevent an infinite extraction loop in some cases
# See: https://github.com/ytdl-org/youtube-dl/issues/28702
visitor_data = self._extract_visitor_data(response) or visitor_data
known_continuation_renderers = {
'playlistVideoListContinuation': self._playlist_entries,
'gridContinuation': self._grid_entries,
'itemSectionContinuation': self._post_thread_continuation_entries,
'sectionListContinuation': extract_entries, # for feeds
}
continuation_contents = try_get(
response, lambda x: x['continuationContents'], dict) or {}
continuation_renderer = None
for key, value in continuation_contents.items():
if key not in known_continuation_renderers:
continue
continuation_renderer = value
continuation_list = [None]
for entry in known_continuation_renderers[key](continuation_renderer):
yield entry
continuation = continuation_list[0] or self._extract_continuation(continuation_renderer)
break
if continuation_renderer:
continue
known_renderers = {
'videoRenderer': (self._grid_entries, 'items'), # for membership tab
'gridPlaylistRenderer': (self._grid_entries, 'items'),
'gridVideoRenderer': (self._grid_entries, 'items'),
'gridChannelRenderer': (self._grid_entries, 'items'),
'playlistVideoRenderer': (self._playlist_entries, 'contents'),
'itemSectionRenderer': (extract_entries, 'contents'), # for feeds
'richItemRenderer': (extract_entries, 'contents'), # for hashtag
'backstagePostThreadRenderer': (self._post_thread_continuation_entries, 'contents')
}
on_response_received = dict_get(response, ('onResponseReceivedActions', 'onResponseReceivedEndpoints'))
continuation_items = try_get(
on_response_received, lambda x: x[0]['appendContinuationItemsAction']['continuationItems'], list)
continuation_item = try_get(continuation_items, lambda x: x[0], dict) or {}
video_items_renderer = None
for key, value in continuation_item.items():
if key not in known_renderers:
continue
video_items_renderer = {known_renderers[key][1]: continuation_items}
continuation_list = [None]
for entry in known_renderers[key][0](video_items_renderer):
yield entry
continuation = continuation_list[0] or self._extract_continuation(video_items_renderer)
break
if video_items_renderer:
continue
break
@staticmethod
def _extract_selected_tab(tabs, fatal=True):
for tab in tabs:
renderer = dict_get(tab, ('tabRenderer', 'expandableTabRenderer')) or {}
if renderer.get('selected') is True:
return renderer
else:
if fatal:
raise ExtractorError('Unable to find selected tab')
def _extract_uploader(self, data):
uploader = {}
renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarSecondaryInfoRenderer') or {}
owner = try_get(
renderer, lambda x: x['videoOwner']['videoOwnerRenderer']['title']['runs'][0], dict)
if owner:
owner_text = owner.get('text')
uploader['uploader'] = self._search_regex(
r'^by (.+) and \d+ others?$', owner_text, 'uploader', default=owner_text)
uploader['uploader_id'] = try_get(
owner, lambda x: x['navigationEndpoint']['browseEndpoint']['browseId'], compat_str)
uploader['uploader_url'] = urljoin(
'https://www.youtube.com/',
try_get(owner, lambda x: x['navigationEndpoint']['browseEndpoint']['canonicalBaseUrl'], compat_str))
return {k: v for k, v in uploader.items() if v is not None}
def _extract_from_tabs(self, item_id, ytcfg, data, tabs):
playlist_id = title = description = channel_url = channel_name = channel_id = None
tags = []
selected_tab = self._extract_selected_tab(tabs)
primary_sidebar_renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer')
renderer = try_get(
data, lambda x: x['metadata']['channelMetadataRenderer'], dict)
if renderer:
channel_name = renderer.get('title')
channel_url = renderer.get('channelUrl')
channel_id = renderer.get('externalId')
else:
renderer = try_get(
data, lambda x: x['metadata']['playlistMetadataRenderer'], dict)
if renderer:
title = renderer.get('title')
description = renderer.get('description', '')
playlist_id = channel_id
tags = renderer.get('keywords', '').split()
# We can get the uncropped banner/avatar by replacing the crop params with '=s0'
# See: https://github.com/yt-dlp/yt-dlp/issues/2237#issuecomment-1013694714
def _get_uncropped(url):
return url_or_none((url or '').split('=')[0] + '=s0')
avatar_thumbnails = self._extract_thumbnails(renderer, 'avatar')
if avatar_thumbnails:
uncropped_avatar = _get_uncropped(avatar_thumbnails[0]['url'])
if uncropped_avatar:
avatar_thumbnails.append({
'url': uncropped_avatar,
'id': 'avatar_uncropped',
'preference': 1
})
channel_banners = self._extract_thumbnails(
data, ('header', ..., ['banner', 'mobileBanner', 'tvBanner']))
for banner in channel_banners:
banner['preference'] = -10
if channel_banners:
uncropped_banner = _get_uncropped(channel_banners[0]['url'])
if uncropped_banner:
channel_banners.append({
'url': uncropped_banner,
'id': 'banner_uncropped',
'preference': -5
})
primary_thumbnails = self._extract_thumbnails(
primary_sidebar_renderer, ('thumbnailRenderer', ('playlistVideoThumbnailRenderer', 'playlistCustomThumbnailRenderer'), 'thumbnail'))
if playlist_id is None:
playlist_id = item_id
playlist_stats = traverse_obj(primary_sidebar_renderer, 'stats')
last_updated_unix, _ = self._extract_time_text(playlist_stats, 2)
if title is None:
title = self._get_text(data, ('header', 'hashtagHeaderRenderer', 'hashtag')) or playlist_id
title += format_field(selected_tab, 'title', ' - %s')
title += format_field(selected_tab, 'expandedText', ' - %s')
metadata = {
'playlist_id': playlist_id,
'playlist_title': title,
'playlist_description': description,
'uploader': channel_name,
'uploader_id': channel_id,
'uploader_url': channel_url,
'thumbnails': primary_thumbnails + avatar_thumbnails + channel_banners,
'tags': tags,
'view_count': self._get_count(playlist_stats, 1),
'availability': self._extract_availability(data),
'modified_date': strftime_or_none(last_updated_unix, '%Y%m%d'),
'playlist_count': self._get_count(playlist_stats, 0),
'channel_follower_count': self._get_count(data, ('header', ..., 'subscriberCountText')),
}
if not channel_id:
metadata.update(self._extract_uploader(data))
metadata.update({
'channel': metadata['uploader'],
'channel_id': metadata['uploader_id'],
'channel_url': metadata['uploader_url']})
return self.playlist_result(
self._entries(
selected_tab, playlist_id, ytcfg,
self._extract_account_syncid(ytcfg, data),
self._extract_visitor_data(data, ytcfg)),
**metadata)
def _extract_mix_playlist(self, playlist, playlist_id, data, ytcfg):
first_id = last_id = response = None
for page_num in itertools.count(1):
videos = list(self._playlist_entries(playlist))
if not videos:
return
start = next((i for i, v in enumerate(videos) if v['id'] == last_id), -1) + 1
if start >= len(videos):
return
for video in videos[start:]:
if video['id'] == first_id:
self.to_screen('First video %s found again; Assuming end of Mix' % first_id)
return
yield video
first_id = first_id or videos[0]['id']
last_id = videos[-1]['id']
watch_endpoint = try_get(
playlist, lambda x: x['contents'][-1]['playlistPanelVideoRenderer']['navigationEndpoint']['watchEndpoint'])
headers = self.generate_api_headers(
ytcfg=ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data),
visitor_data=self._extract_visitor_data(response, data, ytcfg))
query = {
'playlistId': playlist_id,
'videoId': watch_endpoint.get('videoId') or last_id,
'index': watch_endpoint.get('index') or len(videos),
'params': watch_endpoint.get('params') or 'OAE%3D'
}
response = self._extract_response(
item_id='%s page %d' % (playlist_id, page_num),
query=query, ep='next', headers=headers, ytcfg=ytcfg,
check_get_keys='contents'
)
playlist = try_get(
response, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
def _extract_from_playlist(self, item_id, url, data, playlist, ytcfg):
title = playlist.get('title') or try_get(
data, lambda x: x['titleText']['simpleText'], compat_str)
playlist_id = playlist.get('playlistId') or item_id
# Delegating everything except mix playlists to regular tab-based playlist URL
playlist_url = urljoin(url, try_get(
playlist, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
compat_str))
if playlist_url and playlist_url != url:
return self.url_result(
playlist_url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
video_title=title)
return self.playlist_result(
self._extract_mix_playlist(playlist, playlist_id, data, ytcfg),
playlist_id=playlist_id, playlist_title=title)
def _extract_availability(self, data):
"""
Gets the availability of a given playlist/tab.
Note: Unless YouTube tells us explicitly, we do not assume it is public
@param data: response
"""
is_private = is_unlisted = None
renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer') or {}
badge_labels = self._extract_badges(renderer)
# Personal playlists, when authenticated, have a dropdown visibility selector instead of a badge
privacy_dropdown_entries = try_get(
renderer, lambda x: x['privacyForm']['dropdownFormFieldRenderer']['dropdown']['dropdownRenderer']['entries'], list) or []
for renderer_dict in privacy_dropdown_entries:
is_selected = try_get(
renderer_dict, lambda x: x['privacyDropdownItemRenderer']['isSelected'], bool) or False
if not is_selected:
continue
label = self._get_text(renderer_dict, ('privacyDropdownItemRenderer', 'label'))
if label:
badge_labels.add(label.lower())
break
for badge_label in badge_labels:
if badge_label == 'unlisted':
is_unlisted = True
elif badge_label == 'private':
is_private = True
elif badge_label == 'public':
is_unlisted = is_private = False
return self._availability(is_private, False, False, False, is_unlisted)
@staticmethod
def _extract_sidebar_info_renderer(data, info_renderer, expected_type=dict):
sidebar_renderer = try_get(
data, lambda x: x['sidebar']['playlistSidebarRenderer']['items'], list) or []
for item in sidebar_renderer:
renderer = try_get(item, lambda x: x[info_renderer], expected_type)
if renderer:
return renderer
def _reload_with_unavailable_videos(self, item_id, data, ytcfg):
"""
Get playlist with unavailable videos if the 'show unavailable videos' button exists.
"""
browse_id = params = None
renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer')
if not renderer:
return
menu_renderer = try_get(
renderer, lambda x: x['menu']['menuRenderer']['items'], list) or []
for menu_item in menu_renderer:
if not isinstance(menu_item, dict):
continue
nav_item_renderer = menu_item.get('menuNavigationItemRenderer')
text = try_get(
nav_item_renderer, lambda x: x['text']['simpleText'], compat_str)
if not text or text.lower() != 'show unavailable videos':
continue
browse_endpoint = try_get(
nav_item_renderer, lambda x: x['navigationEndpoint']['browseEndpoint'], dict) or {}
browse_id = browse_endpoint.get('browseId')
params = browse_endpoint.get('params')
break
headers = self.generate_api_headers(
ytcfg=ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data),
visitor_data=self._extract_visitor_data(data, ytcfg))
query = {
'params': params or 'wgYCCAA=',
'browseId': browse_id or 'VL%s' % item_id
}
return self._extract_response(
item_id=item_id, headers=headers, query=query,
check_get_keys='contents', fatal=False, ytcfg=ytcfg,
note='Downloading API JSON with unavailable videos')
def _extract_webpage(self, url, item_id, fatal=True):
retries = self.get_param('extractor_retries', 3)
count = -1
webpage = data = last_error = None
while count < retries:
count += 1
# Sometimes youtube returns a webpage with incomplete ytInitialData
# See: https://github.com/yt-dlp/yt-dlp/issues/116
if last_error:
self.report_warning('%s. Retrying ...' % last_error)
try:
webpage = self._download_webpage(
url, item_id,
note='Downloading webpage%s' % (' (retry #%d)' % count if count else '',))
data = self.extract_yt_initial_data(item_id, webpage or '', fatal=fatal) or {}
except ExtractorError as e:
if isinstance(e.cause, network_exceptions):
if not isinstance(e.cause, compat_HTTPError) or e.cause.code not in (403, 429):
last_error = error_to_compat_str(e.cause or e.msg)
if count < retries:
continue
if fatal:
raise
self.report_warning(error_to_compat_str(e))
break
else:
try:
self._extract_and_report_alerts(data)
except ExtractorError as e:
if fatal:
raise
self.report_warning(error_to_compat_str(e))
break
if dict_get(data, ('contents', 'currentVideoEndpoint', 'onResponseReceivedActions')):
break
last_error = 'Incomplete yt initial data received'
if count >= retries:
if fatal:
raise ExtractorError(last_error)
self.report_warning(last_error)
break
return webpage, data
def _extract_data(self, url, item_id, ytcfg=None, fatal=True, webpage_fatal=False, default_client='web'):
data = None
if 'webpage' not in self._configuration_arg('skip'):
webpage, data = self._extract_webpage(url, item_id, fatal=webpage_fatal)
ytcfg = ytcfg or self.extract_ytcfg(item_id, webpage)
# Reject webpage data if redirected to home page without explicitly requesting
selected_tab = self._extract_selected_tab(traverse_obj(
data, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs'), expected_type=list, default=[]), fatal=False) or {}
if (url != 'https://www.youtube.com/feed/recommended'
and selected_tab.get('tabIdentifier') == 'FEwhat_to_watch' # Home page
and 'no-youtube-channel-redirect' not in self.get_param('compat_opts', [])):
msg = 'The channel/playlist does not exist and the URL redirected to youtube.com home page'
if fatal:
raise ExtractorError(msg, expected=True)
self.report_warning(msg, only_once=True)
if not data:
if not ytcfg and self.is_authenticated:
msg = 'Playlists that require authentication may not extract correctly without a successful webpage download.'
if 'authcheck' not in self._configuration_arg('skip') and fatal:
raise ExtractorError(
msg + ' If you are not downloading private content, or your cookies are only for the first account and channel,'
' pass "--extractor-args youtubetab:skip=authcheck" to skip this check',
expected=True)
self.report_warning(msg, only_once=True)
data = self._extract_tab_endpoint(url, item_id, ytcfg, fatal=fatal, default_client=default_client)
return data, ytcfg
def _extract_tab_endpoint(self, url, item_id, ytcfg=None, fatal=True, default_client='web'):
headers = self.generate_api_headers(ytcfg=ytcfg, default_client=default_client)
resolve_response = self._extract_response(
item_id=item_id, query={'url': url}, check_get_keys='endpoint', headers=headers, ytcfg=ytcfg, fatal=fatal,
ep='navigation/resolve_url', note='Downloading API parameters API JSON', default_client=default_client)
endpoints = {'browseEndpoint': 'browse', 'watchEndpoint': 'next'}
for ep_key, ep in endpoints.items():
params = try_get(resolve_response, lambda x: x['endpoint'][ep_key], dict)
if params:
return self._extract_response(
item_id=item_id, query=params, ep=ep, headers=headers,
ytcfg=ytcfg, fatal=fatal, default_client=default_client,
check_get_keys=('contents', 'currentVideoEndpoint', 'onResponseReceivedActions'))
err_note = 'Failed to resolve url (does the playlist exist?)'
if fatal:
raise ExtractorError(err_note, expected=True)
self.report_warning(err_note, item_id)
_SEARCH_PARAMS = None
def _search_results(self, query, params=NO_DEFAULT, default_client='web'):
data = {'query': query}
if params is NO_DEFAULT:
params = self._SEARCH_PARAMS
if params:
data['params'] = params
content_keys = (
('contents', 'twoColumnSearchResultsRenderer', 'primaryContents', 'sectionListRenderer', 'contents'),
('onResponseReceivedCommands', 0, 'appendContinuationItemsAction', 'continuationItems'),
# ytmusic search
('contents', 'tabbedSearchResultsRenderer', 'tabs', 0, 'tabRenderer', 'content', 'sectionListRenderer', 'contents'),
('continuationContents', ),
)
check_get_keys = tuple(set(keys[0] for keys in content_keys))
continuation_list = [None]
for page_num in itertools.count(1):
data.update(continuation_list[0] or {})
search = self._extract_response(
item_id='query "%s" page %s' % (query, page_num), ep='search', query=data,
default_client=default_client, check_get_keys=check_get_keys)
slr_contents = traverse_obj(search, *content_keys)
yield from self._extract_entries({'contents': list(variadic(slr_contents))}, continuation_list)
if not continuation_list[0]:
break
class YoutubeTabIE(YoutubeTabBaseInfoExtractor):
IE_DESC = 'YouTube Tabs'
_VALID_URL = r'''(?x:
https?://
(?:\w+\.)?
(?:
youtube(?:kids)?\.com|
%(invidious)s
)/
(?:
(?P<channel_type>channel|c|user|browse)/|
(?P<not_channel>
feed/|hashtag/|
(?:playlist|watch)\?.*?\blist=
)|
(?!(?:%(reserved_names)s)\b) # Direct URLs
)
(?P<id>[^/?\#&]+)
)''' % {
'reserved_names': YoutubeBaseInfoExtractor._RESERVED_NAMES,
'invidious': '|'.join(YoutubeBaseInfoExtractor._INVIDIOUS_SITES),
}
IE_NAME = 'youtube:tab'
_TESTS = [{
'note': 'playlists, multipage',
'url': 'https://www.youtube.com/c/ИгорьКлейнер/playlists?view=1&flow=grid',
'playlist_mincount': 94,
'info_dict': {
'id': 'UCqj7Cz7revf5maW9g5pgNcg',
'title': 'Igor Kleiner - Playlists',
'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
'uploader': 'Igor Kleiner',
'uploader_id': 'UCqj7Cz7revf5maW9g5pgNcg',
'channel': 'Igor Kleiner',
'channel_id': 'UCqj7Cz7revf5maW9g5pgNcg',
'tags': ['"критическое', 'мышление"', '"наука', 'просто"', 'математика', '"анализ', 'данных"'],
'channel_url': 'https://www.youtube.com/channel/UCqj7Cz7revf5maW9g5pgNcg',
'uploader_url': 'https://www.youtube.com/channel/UCqj7Cz7revf5maW9g5pgNcg',
'channel_follower_count': int
},
}, {
'note': 'playlists, multipage, different order',
'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
'playlist_mincount': 94,
'info_dict': {
'id': 'UCqj7Cz7revf5maW9g5pgNcg',
'title': 'Igor Kleiner - Playlists',
'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
'uploader_id': 'UCqj7Cz7revf5maW9g5pgNcg',
'uploader': 'Igor Kleiner',
'uploader_url': 'https://www.youtube.com/channel/UCqj7Cz7revf5maW9g5pgNcg',
'tags': ['"критическое', 'мышление"', '"наука', 'просто"', 'математика', '"анализ', 'данных"'],
'channel_id': 'UCqj7Cz7revf5maW9g5pgNcg',
'channel': 'Igor Kleiner',
'channel_url': 'https://www.youtube.com/channel/UCqj7Cz7revf5maW9g5pgNcg',
'channel_follower_count': int
},
}, {
'note': 'playlists, series',
'url': 'https://www.youtube.com/c/3blue1brown/playlists?view=50&sort=dd&shelf_id=3',
'playlist_mincount': 5,
'info_dict': {
'id': 'UCYO_jab_esuFRV4b17AJtAw',
'title': '3Blue1Brown - Playlists',
'description': 'md5:e1384e8a133307dd10edee76e875d62f',
'uploader_id': 'UCYO_jab_esuFRV4b17AJtAw',
'uploader': '3Blue1Brown',
'channel_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw',
'uploader_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw',
'channel': '3Blue1Brown',
'channel_id': 'UCYO_jab_esuFRV4b17AJtAw',
'tags': ['Mathematics'],
'channel_follower_count': int
},
}, {
'note': 'playlists, singlepage',
'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
'playlist_mincount': 4,
'info_dict': {
'id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
'title': 'ThirstForScience - Playlists',
'description': 'md5:609399d937ea957b0f53cbffb747a14c',
'uploader': 'ThirstForScience',
'uploader_id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
'uploader_url': 'https://www.youtube.com/channel/UCAEtajcuhQ6an9WEzY9LEMQ',
'channel_url': 'https://www.youtube.com/channel/UCAEtajcuhQ6an9WEzY9LEMQ',
'channel_id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
'tags': 'count:13',
'channel': 'ThirstForScience',
'channel_follower_count': int
}
}, {
'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
'only_matching': True,
}, {
'note': 'basic, single video playlist',
'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'info_dict': {
'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'uploader': 'Sergey M.',
'id': 'PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'title': 'youtube-dl public playlist',
'description': '',
'tags': [],
'view_count': int,
'modified_date': '20201130',
'channel': 'Sergey M.',
'channel_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'uploader_url': 'https://www.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'channel_url': 'https://www.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
},
'playlist_count': 1,
}, {
'note': 'empty playlist',
'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
'info_dict': {
'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'uploader': 'Sergey M.',
'id': 'PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
'title': 'youtube-dl empty playlist',
'tags': [],
'channel': 'Sergey M.',
'description': '',
'modified_date': '20160902',
'channel_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'channel_url': 'https://www.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'uploader_url': 'https://www.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
},
'playlist_count': 0,
}, {
'note': 'Home tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/featured',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Home',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'tags': ['bible', 'history', 'prophesy'],
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_follower_count': int
},
'playlist_mincount': 2,
}, {
'note': 'Videos tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Videos',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'tags': ['bible', 'history', 'prophesy'],
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'channel_follower_count': int
},
'playlist_mincount': 975,
}, {
'note': 'Videos tab, sorted by popular',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos?view=0&sort=p&flow=grid',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Videos',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'tags': ['bible', 'history', 'prophesy'],
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_follower_count': int
},
'playlist_mincount': 199,
}, {
'note': 'Playlists tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/playlists',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Playlists',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'tags': ['bible', 'history', 'prophesy'],
'channel_follower_count': int
},
'playlist_mincount': 17,
}, {
'note': 'Community tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/community',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Community',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'tags': ['bible', 'history', 'prophesy'],
'channel_follower_count': int
},
'playlist_mincount': 18,
}, {
'note': 'Channels tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/channels',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Channels',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'tags': ['bible', 'history', 'prophesy'],
'channel_follower_count': int
},
'playlist_mincount': 12,
}, {
'note': 'Search tab',
'url': 'https://www.youtube.com/c/3blue1brown/search?query=linear%20algebra',
'playlist_mincount': 40,
'info_dict': {
'id': 'UCYO_jab_esuFRV4b17AJtAw',
'title': '3Blue1Brown - Search - linear algebra',
'description': 'md5:e1384e8a133307dd10edee76e875d62f',
'uploader': '3Blue1Brown',
'uploader_id': 'UCYO_jab_esuFRV4b17AJtAw',
'channel_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw',
'uploader_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw',
'tags': ['Mathematics'],
'channel': '3Blue1Brown',
'channel_id': 'UCYO_jab_esuFRV4b17AJtAw',
'channel_follower_count': int
},
}, {
'url': 'https://invidio.us/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'only_matching': True,
}, {
'url': 'https://www.youtubekids.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'only_matching': True,
}, {
'url': 'https://music.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'only_matching': True,
}, {
'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'info_dict': {
'title': '29C3: Not my department',
'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'uploader': 'Christiaan008',
'uploader_id': 'UCEPzS1rYsrkqzSLNp76nrcg',
'description': 'md5:a14dc1a8ef8307a9807fe136a0660268',
'tags': [],
'uploader_url': 'https://www.youtube.com/c/ChRiStIaAn008',
'view_count': int,
'modified_date': '20150605',
'channel_id': 'UCEPzS1rYsrkqzSLNp76nrcg',
'channel_url': 'https://www.youtube.com/c/ChRiStIaAn008',
'channel': 'Christiaan008',
},
'playlist_count': 96,
}, {
'note': 'Large playlist',
'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
'info_dict': {
'title': 'Uploads from Cauchemar',
'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
'uploader': 'Cauchemar',
'uploader_id': 'UCBABnxM4Ar9ten8Mdjj1j0Q',
'channel_url': 'https://www.youtube.com/c/Cauchemar89',
'tags': [],
'modified_date': r're:\d{8}',
'channel': 'Cauchemar',
'uploader_url': 'https://www.youtube.com/c/Cauchemar89',
'view_count': int,
'description': '',
'channel_id': 'UCBABnxM4Ar9ten8Mdjj1j0Q',
},
'playlist_mincount': 1123,
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'note': 'even larger playlist, 8832 videos',
'url': 'http://www.youtube.com/user/NASAgovVideo/videos',
'only_matching': True,
}, {
'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
'info_dict': {
'title': 'Uploads from Interstellar Movie',
'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
'uploader': 'Interstellar Movie',
'uploader_id': 'UCXw-G3eDE9trcvY2sBMM_aA',
'uploader_url': 'https://www.youtube.com/c/InterstellarMovie',
'tags': [],
'view_count': int,
'channel_id': 'UCXw-G3eDE9trcvY2sBMM_aA',
'channel_url': 'https://www.youtube.com/c/InterstellarMovie',
'channel': 'Interstellar Movie',
'description': '',
'modified_date': r're:\d{8}',
},
'playlist_mincount': 21,
}, {
'note': 'Playlist with "show unavailable videos" button',
'url': 'https://www.youtube.com/playlist?list=UUTYLiWFZy8xtPwxFwX9rV7Q',
'info_dict': {
'title': 'Uploads from Phim Siêu Nhân Nhật Bản',
'id': 'UUTYLiWFZy8xtPwxFwX9rV7Q',
'uploader': 'Phim Siêu Nhân Nhật Bản',
'uploader_id': 'UCTYLiWFZy8xtPwxFwX9rV7Q',
'view_count': int,
'channel': 'Phim Siêu Nhân Nhật Bản',
'tags': [],
'uploader_url': 'https://www.youtube.com/channel/UCTYLiWFZy8xtPwxFwX9rV7Q',
'description': '',
'channel_url': 'https://www.youtube.com/channel/UCTYLiWFZy8xtPwxFwX9rV7Q',
'channel_id': 'UCTYLiWFZy8xtPwxFwX9rV7Q',
'modified_date': r're:\d{8}',
},
'playlist_mincount': 200,
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'note': 'Playlist with unavailable videos in page 7',
'url': 'https://www.youtube.com/playlist?list=UU8l9frL61Yl5KFOl87nIm2w',
'info_dict': {
'title': 'Uploads from BlankTV',
'id': 'UU8l9frL61Yl5KFOl87nIm2w',
'uploader': 'BlankTV',
'uploader_id': 'UC8l9frL61Yl5KFOl87nIm2w',
'channel': 'BlankTV',
'channel_url': 'https://www.youtube.com/c/blanktv',
'channel_id': 'UC8l9frL61Yl5KFOl87nIm2w',
'view_count': int,
'tags': [],
'uploader_url': 'https://www.youtube.com/c/blanktv',
'modified_date': r're:\d{8}',
'description': '',
},
'playlist_mincount': 1000,
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'note': 'https://github.com/ytdl-org/youtube-dl/issues/21844',
'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
'info_dict': {
'title': 'Data Analysis with Dr Mike Pound',
'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
'uploader_id': 'UC9-y-6csu5WGm29I7JiwpnA',
'uploader': 'Computerphile',
'description': 'md5:7f567c574d13d3f8c0954d9ffee4e487',
'uploader_url': 'https://www.youtube.com/user/Computerphile',
'tags': [],
'view_count': int,
'channel_id': 'UC9-y-6csu5WGm29I7JiwpnA',
'channel_url': 'https://www.youtube.com/user/Computerphile',
'channel': 'Computerphile',
},
'playlist_mincount': 11,
}, {
'url': 'https://invidio.us/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'only_matching': True,
}, {
'note': 'Playlist URL that does not actually serve a playlist',
'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
'info_dict': {
'id': 'FqZTN594JQw',
'ext': 'webm',
'title': "Smiley's People 01 detective, Adventure Series, Action",
'uploader': 'STREEM',
'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
'upload_date': '20150526',
'license': 'Standard YouTube License',
'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
'categories': ['People & Blogs'],
'tags': list,
'view_count': int,
'like_count': int,
},
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
'add_ie': [YoutubeIE.ie_key()],
}, {
'url': 'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ/live',
'info_dict': {
'id': 'GgL890LIznQ', # This will keep changing
'ext': 'mp4',
'title': str,
'uploader': 'Sky News',
'uploader_id': 'skynews',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/skynews',
'upload_date': r're:\d{8}',
'description': str,
'categories': ['News & Politics'],
'tags': list,
'like_count': int,
'release_timestamp': 1642502819,
'channel': 'Sky News',
'channel_id': 'UCoMdktPbSTixAyNGwb-UYkQ',
'age_limit': 0,
'view_count': int,
'thumbnail': 'https://i.ytimg.com/vi/GgL890LIznQ/maxresdefault_live.jpg',
'playable_in_embed': True,
'release_date': '20220118',
'availability': 'public',
'live_status': 'is_live',
'channel_url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Ignoring subtitle tracks found in '],
}, {
'url': 'https://www.youtube.com/user/TheYoungTurks/live',
'info_dict': {
'id': 'a48o2S1cPoo',
'ext': 'mp4',
'title': 'The Young Turks - Live Main Show',
'uploader': 'The Young Turks',
'uploader_id': 'TheYoungTurks',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
'upload_date': '20150715',
'license': 'Standard YouTube License',
'description': 'md5:438179573adcdff3c97ebb1ee632b891',
'categories': ['News & Politics'],
'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
'like_count': int,
},
'params': {
'skip_download': True,
},
'only_matching': True,
}, {
'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
'only_matching': True,
}, {
'note': 'A channel that is not live. Should raise error',
'url': 'https://www.youtube.com/user/numberphile/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/trending',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/library',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/history',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/subscriptions',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/watch_later',
'only_matching': True,
}, {
'note': 'Recommended - redirects to home page.',
'url': 'https://www.youtube.com/feed/recommended',
'only_matching': True,
}, {
'note': 'inline playlist with not always working continuations',
'url': 'https://www.youtube.com/watch?v=UC6u0Tct-Fo&list=PL36D642111D65BE7C',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/course',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/zsecurity',
'only_matching': True,
}, {
'url': 'http://www.youtube.com/NASAgovVideo/videos',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/TheYoungTurks/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/hashtag/cctv9',
'info_dict': {
'id': 'cctv9',
'title': '#cctv9',
'tags': [],
},
'playlist_mincount': 350,
}, {
'url': 'https://www.youtube.com/watch?list=PLW4dVinRY435CBE_JD3t-0SRXKfnZHS1P&feature=youtu.be&v=M9cJMXmQ_ZU',
'only_matching': True,
}, {
'note': 'Requires Premium: should request additional YTM-info webpage (and have format 141) for videos in playlist',
'url': 'https://music.youtube.com/playlist?list=PLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
'only_matching': True
}, {
'note': '/browse/ should redirect to /channel/',
'url': 'https://music.youtube.com/browse/UC1a8OFewdjuLq6KlF8M_8Ng',
'only_matching': True
}, {
'note': 'VLPL, should redirect to playlist?list=PL...',
'url': 'https://music.youtube.com/browse/VLPLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
'info_dict': {
'id': 'PLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
'uploader': 'NoCopyrightSounds',
'description': 'Providing you with copyright free / safe music for gaming, live streaming, studying and more!',
'uploader_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
'title': 'NCS Releases',
'uploader_url': 'https://www.youtube.com/c/NoCopyrightSounds',
'channel_url': 'https://www.youtube.com/c/NoCopyrightSounds',
'modified_date': r're:\d{8}',
'view_count': int,
'channel_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
'tags': [],
'channel': 'NoCopyrightSounds',
},
'playlist_mincount': 166,
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'note': 'Topic, should redirect to playlist?list=UU...',
'url': 'https://music.youtube.com/browse/UC9ALqqC4aIeG5iDs7i90Bfw',
'info_dict': {
'id': 'UU9ALqqC4aIeG5iDs7i90Bfw',
'uploader_id': 'UC9ALqqC4aIeG5iDs7i90Bfw',
'title': 'Uploads from Royalty Free Music - Topic',
'uploader': 'Royalty Free Music - Topic',
'tags': [],
'channel_id': 'UC9ALqqC4aIeG5iDs7i90Bfw',
'channel': 'Royalty Free Music - Topic',
'view_count': int,
'channel_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw',
'channel_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw',
'modified_date': r're:\d{8}',
'uploader_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw',
'description': '',
},
'expected_warnings': [
'The URL does not have a videos tab',
r'[Uu]navailable videos (are|will be) hidden',
],
'playlist_mincount': 101,
}, {
'note': 'Topic without a UU playlist',
'url': 'https://www.youtube.com/channel/UCtFRv9O2AHqOZjjynzrv-xg',
'info_dict': {
'id': 'UCtFRv9O2AHqOZjjynzrv-xg',
'title': 'UCtFRv9O2AHqOZjjynzrv-xg',
'tags': [],
},
'expected_warnings': [
'the playlist redirect gave error',
],
'playlist_mincount': 9,
}, {
'note': 'Youtube music Album',
'url': 'https://music.youtube.com/browse/MPREb_gTAcphH99wE',
'info_dict': {
'id': 'OLAK5uy_l1m0thk3g31NmIIz_vMIbWtyv7eZixlH0',
'title': 'Album - Royalty Free Music Library V2 (50 Songs)',
'tags': [],
'view_count': int,
'description': '',
'availability': 'unlisted',
'modified_date': r're:\d{8}',
},
'playlist_count': 50,
}, {
'note': 'unlisted single video playlist',
'url': 'https://www.youtube.com/playlist?list=PLwL24UFy54GrB3s2KMMfjZscDi1x5Dajf',
'info_dict': {
'uploader_id': 'UC9zHu_mHU96r19o-wV5Qs1Q',
'uploader': 'colethedj',
'id': 'PLwL24UFy54GrB3s2KMMfjZscDi1x5Dajf',
'title': 'yt-dlp unlisted playlist test',
'availability': 'unlisted',
'tags': [],
'modified_date': '20211208',
'channel': 'colethedj',
'view_count': int,
'description': '',
'uploader_url': 'https://www.youtube.com/channel/UC9zHu_mHU96r19o-wV5Qs1Q',
'channel_id': 'UC9zHu_mHU96r19o-wV5Qs1Q',
'channel_url': 'https://www.youtube.com/channel/UC9zHu_mHU96r19o-wV5Qs1Q',
},
'playlist_count': 1,
}, {
'note': 'API Fallback: Recommended - redirects to home page. Requires visitorData',
'url': 'https://www.youtube.com/feed/recommended',
'info_dict': {
'id': 'recommended',
'title': 'recommended',
'tags': [],
},
'playlist_mincount': 50,
'params': {
'skip_download': True,
'extractor_args': {'youtubetab': {'skip': ['webpage']}}
},
}, {
'note': 'API Fallback: /videos tab, sorted by oldest first',
'url': 'https://www.youtube.com/user/theCodyReeder/videos?view=0&sort=da&flow=grid',
'info_dict': {
'id': 'UCu6mSoMNzHQiBIOCkHUa2Aw',
'title': 'Cody\'sLab - Videos',
'description': 'md5:d083b7c2f0c67ee7a6c74c3e9b4243fa',
'uploader': 'Cody\'sLab',
'uploader_id': 'UCu6mSoMNzHQiBIOCkHUa2Aw',
'channel': 'Cody\'sLab',
'channel_id': 'UCu6mSoMNzHQiBIOCkHUa2Aw',
'tags': [],
'channel_url': 'https://www.youtube.com/channel/UCu6mSoMNzHQiBIOCkHUa2Aw',
'uploader_url': 'https://www.youtube.com/channel/UCu6mSoMNzHQiBIOCkHUa2Aw',
'channel_follower_count': int
},
'playlist_mincount': 650,
'params': {
'skip_download': True,
'extractor_args': {'youtubetab': {'skip': ['webpage']}}
},
}, {
'note': 'API Fallback: Topic, should redirect to playlist?list=UU...',
'url': 'https://music.youtube.com/browse/UC9ALqqC4aIeG5iDs7i90Bfw',
'info_dict': {
'id': 'UU9ALqqC4aIeG5iDs7i90Bfw',
'uploader_id': 'UC9ALqqC4aIeG5iDs7i90Bfw',
'title': 'Uploads from Royalty Free Music - Topic',
'uploader': 'Royalty Free Music - Topic',
'modified_date': r're:\d{8}',
'channel_id': 'UC9ALqqC4aIeG5iDs7i90Bfw',
'description': '',
'channel_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw',
'tags': [],
'channel': 'Royalty Free Music - Topic',
'view_count': int,
'uploader_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw',
},
'expected_warnings': [
'does not have a videos tab',
r'[Uu]navailable videos (are|will be) hidden',
],
'playlist_mincount': 101,
'params': {
'skip_download': True,
'extractor_args': {'youtubetab': {'skip': ['webpage']}}
},
}, {
'note': 'non-standard redirect to regional channel',
'url': 'https://www.youtube.com/channel/UCwVVpHQ2Cs9iGJfpdFngePQ',
'only_matching': True
}, {
'note': 'collaborative playlist (uploader name in the form "by <uploader> and x other(s)")',
'url': 'https://www.youtube.com/playlist?list=PLx-_-Kk4c89oOHEDQAojOXzEzemXxoqx6',
'info_dict': {
'id': 'PLx-_-Kk4c89oOHEDQAojOXzEzemXxoqx6',
'modified_date': '20220407',
'channel_url': 'https://www.youtube.com/channel/UCKcqXmCcyqnhgpA5P0oHH_Q',
'tags': [],
'uploader_id': 'UCKcqXmCcyqnhgpA5P0oHH_Q',
'uploader': 'pukkandan',
'availability': 'unlisted',
'channel_id': 'UCKcqXmCcyqnhgpA5P0oHH_Q',
'channel': 'pukkandan',
'description': 'Test for collaborative playlist',
'title': 'yt-dlp test - collaborative playlist',
'uploader_url': 'https://www.youtube.com/channel/UCKcqXmCcyqnhgpA5P0oHH_Q',
},
'playlist_mincount': 2
}]
@classmethod
def suitable(cls, url):
return False if YoutubeIE.suitable(url) else super(
YoutubeTabIE, cls).suitable(url)
_URL_RE = re.compile(rf'(?P<pre>{_VALID_URL})(?(not_channel)|(?P<tab>/\w+))?(?P<post>.*)$')
@YoutubeTabBaseInfoExtractor.passthrough_smuggled_data
def _real_extract(self, url, smuggled_data):
item_id = self._match_id(url)
url = compat_urlparse.urlunparse(
compat_urlparse.urlparse(url)._replace(netloc='www.youtube.com'))
compat_opts = self.get_param('compat_opts', [])
def get_mobj(url):
mobj = self._URL_RE.match(url).groupdict()
mobj.update((k, '') for k, v in mobj.items() if v is None)
return mobj
mobj, redirect_warning = get_mobj(url), None
# Youtube returns incomplete data if tabname is not lower case
pre, tab, post, is_channel = mobj['pre'], mobj['tab'].lower(), mobj['post'], not mobj['not_channel']
if is_channel:
if smuggled_data.get('is_music_url'):
if item_id[:2] == 'VL': # Youtube music VL channels have an equivalent playlist
item_id = item_id[2:]
pre, tab, post, is_channel = f'https://www.youtube.com/playlist?list={item_id}', '', '', False
elif item_id[:2] == 'MP': # Resolve albums (/[channel/browse]/MP...) to their equivalent playlist
mdata = self._extract_tab_endpoint(
f'https://music.youtube.com/channel/{item_id}', item_id, default_client='web_music')
murl = traverse_obj(mdata, ('microformat', 'microformatDataRenderer', 'urlCanonical'),
get_all=False, expected_type=compat_str)
if not murl:
raise ExtractorError('Failed to resolve album to playlist')
return self.url_result(murl, ie=YoutubeTabIE.ie_key())
elif mobj['channel_type'] == 'browse': # Youtube music /browse/ should be changed to /channel/
pre = f'https://www.youtube.com/channel/{item_id}'
original_tab_name = tab
if is_channel and not tab and 'no-youtube-channel-redirect' not in compat_opts:
# Home URLs should redirect to /videos/
redirect_warning = ('A channel/user page was given. All the channel\'s videos will be downloaded. '
'To download only the videos in the home page, add a "/featured" to the URL')
tab = '/videos'
url = ''.join((pre, tab, post))
mobj = get_mobj(url)
# Handle both video/playlist URLs
qs = parse_qs(url)
video_id, playlist_id = [qs.get(key, [None])[0] for key in ('v', 'list')]
if not video_id and mobj['not_channel'].startswith('watch'):
if not playlist_id:
# If there is neither video or playlist ids, youtube redirects to home page, which is undesirable
raise ExtractorError('Unable to recognize tab page')
# Common mistake: https://www.youtube.com/watch?list=playlist_id
self.report_warning(f'A video URL was given without video ID. Trying to download playlist {playlist_id}')
url = f'https://www.youtube.com/playlist?list={playlist_id}'
mobj = get_mobj(url)
if video_id and playlist_id:
if self.get_param('noplaylist'):
self.to_screen(f'Downloading just video {video_id} because of --no-playlist')
return self.url_result(f'https://www.youtube.com/watch?v={video_id}',
ie=YoutubeIE.ie_key(), video_id=video_id)
self.to_screen(f'Downloading playlist {playlist_id}; add --no-playlist to just download video {video_id}')
data, ytcfg = self._extract_data(url, item_id)
# YouTube may provide a non-standard redirect to the regional channel
# See: https://github.com/yt-dlp/yt-dlp/issues/2694
redirect_url = traverse_obj(
data, ('onResponseReceivedActions', ..., 'navigateAction', 'endpoint', 'commandMetadata', 'webCommandMetadata', 'url'), get_all=False)
if redirect_url and 'no-youtube-channel-redirect' not in compat_opts:
redirect_url = ''.join((
urljoin('https://www.youtube.com', redirect_url), mobj['tab'], mobj['post']))
self.to_screen(f'This playlist is likely not available in your region. Following redirect to regional playlist {redirect_url}')
return self.url_result(redirect_url, ie=YoutubeTabIE.ie_key())
tabs = traverse_obj(data, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs'), expected_type=list)
if tabs:
selected_tab = self._extract_selected_tab(tabs)
selected_tab_name = selected_tab.get('title', '').lower()
if selected_tab_name == 'home':
selected_tab_name = 'featured'
requested_tab_name = mobj['tab'][1:]
if 'no-youtube-channel-redirect' not in compat_opts:
if requested_tab_name == 'live':
# Live tab should have redirected to the video
raise ExtractorError('The channel is not currently live', expected=True)
if requested_tab_name not in ('', selected_tab_name):
redirect_warning = f'The channel does not have a {requested_tab_name} tab'
if not original_tab_name:
if item_id[:2] == 'UC':
# Topic channels don't have /videos. Use the equivalent playlist instead
pl_id = f'UU{item_id[2:]}'
pl_url = f'https://www.youtube.com/playlist?list={pl_id}'
try:
data, ytcfg = self._extract_data(pl_url, pl_id, ytcfg=ytcfg, fatal=True, webpage_fatal=True)
except ExtractorError:
redirect_warning += ' and the playlist redirect gave error'
else:
item_id, url, selected_tab_name = pl_id, pl_url, requested_tab_name
redirect_warning += f'. Redirecting to playlist {pl_id} instead'
if selected_tab_name and selected_tab_name != requested_tab_name:
redirect_warning += f'. {selected_tab_name} tab is being downloaded instead'
else:
raise ExtractorError(redirect_warning, expected=True)
if redirect_warning:
self.to_screen(redirect_warning)
self.write_debug(f'Final URL: {url}')
# YouTube sometimes provides a button to reload playlist with unavailable videos.
if 'no-youtube-unavailable-videos' not in compat_opts:
data = self._reload_with_unavailable_videos(item_id, data, ytcfg) or data
self._extract_and_report_alerts(data, only_once=True)
tabs = traverse_obj(data, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs'), expected_type=list)
if tabs:
return self._extract_from_tabs(item_id, ytcfg, data, tabs)
playlist = traverse_obj(
data, ('contents', 'twoColumnWatchNextResults', 'playlist', 'playlist'), expected_type=dict)
if playlist:
return self._extract_from_playlist(item_id, url, data, playlist, ytcfg)
video_id = traverse_obj(
data, ('currentVideoEndpoint', 'watchEndpoint', 'videoId'), expected_type=str) or video_id
if video_id:
if mobj['tab'] != '/live': # live tab is expected to redirect to video
self.report_warning(f'Unable to recognize playlist. Downloading just video {video_id}')
return self.url_result(f'https://www.youtube.com/watch?v={video_id}',
ie=YoutubeIE.ie_key(), video_id=video_id)
raise ExtractorError('Unable to recognize tab page')
class YoutubePlaylistIE(InfoExtractor):
IE_DESC = 'YouTube playlists'
_VALID_URL = r'''(?x)(?:
(?:https?://)?
(?:\w+\.)?
(?:
(?:
youtube(?:kids)?\.com|
%(invidious)s
)
/.*?\?.*?\blist=
)?
(?P<id>%(playlist_id)s)
)''' % {
'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE,
'invidious': '|'.join(YoutubeBaseInfoExtractor._INVIDIOUS_SITES),
}
IE_NAME = 'youtube:playlist'
_TESTS = [{
'note': 'issue #673',
'url': 'PLBB231211A4F62143',
'info_dict': {
'title': '[OLD]Team Fortress 2 (Class-based LP)',
'id': 'PLBB231211A4F62143',
'uploader': 'Wickman',
'uploader_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
'description': 'md5:8fa6f52abb47a9552002fa3ddfc57fc2',
'view_count': int,
'uploader_url': 'https://www.youtube.com/user/Wickydoo',
'modified_date': r're:\d{8}',
'channel_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
'channel': 'Wickman',
'tags': [],
'channel_url': 'https://www.youtube.com/user/Wickydoo',
},
'playlist_mincount': 29,
}, {
'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
'info_dict': {
'title': 'YDL_safe_search',
'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
},
'playlist_count': 2,
'skip': 'This playlist is private',
}, {
'note': 'embedded',
'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'playlist_count': 4,
'info_dict': {
'title': 'JODA15',
'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'uploader': 'milan',
'uploader_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
'description': '',
'channel_url': 'https://www.youtube.com/channel/UCEI1-PVPcYXjB73Hfelbmaw',
'tags': [],
'modified_date': '20140919',
'view_count': int,
'channel': 'milan',
'channel_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
'uploader_url': 'https://www.youtube.com/channel/UCEI1-PVPcYXjB73Hfelbmaw',
},
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'playlist_mincount': 654,
'info_dict': {
'title': '2018 Chinese New Singles (11/6 updated)',
'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'uploader': 'LBK',
'uploader_id': 'UC21nz3_MesPLqtDqwdvnoxA',
'description': 'md5:da521864744d60a198e3a88af4db0d9d',
'channel': 'LBK',
'view_count': int,
'channel_url': 'https://www.youtube.com/c/愛低音的國王',
'tags': [],
'uploader_url': 'https://www.youtube.com/c/愛低音的國王',
'channel_id': 'UC21nz3_MesPLqtDqwdvnoxA',
'modified_date': r're:\d{8}',
},
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
'only_matching': True,
}, {
# music album playlist
'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
if YoutubeTabIE.suitable(url):
return False
from ..utils import parse_qs
qs = parse_qs(url)
if qs.get('v', [None])[0]:
return False
return super(YoutubePlaylistIE, cls).suitable(url)
def _real_extract(self, url):
playlist_id = self._match_id(url)
is_music_url = YoutubeBaseInfoExtractor.is_music_url(url)
url = update_url_query(
'https://www.youtube.com/playlist',
parse_qs(url) or {'list': playlist_id})
if is_music_url:
url = smuggle_url(url, {'is_music_url': True})
return self.url_result(url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
class YoutubeYtBeIE(InfoExtractor):
IE_DESC = 'youtu.be'
_VALID_URL = r'https?://youtu\.be/(?P<id>[0-9A-Za-z_-]{11})/*?.*?\blist=(?P<playlist_id>%(playlist_id)s)' % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
_TESTS = [{
'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
'info_dict': {
'id': 'yeWKywCrFtk',
'ext': 'mp4',
'title': 'Small Scale Baler and Braiding Rugs',
'uploader': 'Backus-Page House Museum',
'uploader_id': 'backuspagemuseum',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum',
'upload_date': '20161008',
'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
'categories': ['Nonprofits & Activism'],
'tags': list,
'like_count': int,
'age_limit': 0,
'playable_in_embed': True,
'thumbnail': 'https://i.ytimg.com/vi_webp/yeWKywCrFtk/maxresdefault.webp',
'channel': 'Backus-Page House Museum',
'channel_id': 'UCEfMCQ9bs3tjvjy1s451zaw',
'live_status': 'not_live',
'view_count': int,
'channel_url': 'https://www.youtube.com/channel/UCEfMCQ9bs3tjvjy1s451zaw',
'availability': 'public',
'duration': 59,
},
'params': {
'noplaylist': True,
'skip_download': True,
},
}, {
'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = self._match_valid_url(url)
video_id = mobj.group('id')
playlist_id = mobj.group('playlist_id')
return self.url_result(
update_url_query('https://www.youtube.com/watch', {
'v': video_id,
'list': playlist_id,
'feature': 'youtu.be',
}), ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
class YoutubeLivestreamEmbedIE(InfoExtractor):
IE_DESC = 'YouTube livestream embeds'
_VALID_URL = r'https?://(?:\w+\.)?youtube\.com/embed/live_stream/?\?(?:[^#]+&)?channel=(?P<id>[^&#]+)'
_TESTS = [{
'url': 'https://www.youtube.com/embed/live_stream?channel=UC2_KI6RB__jGdlnK6dvFEZA',
'only_matching': True,
}]
def _real_extract(self, url):
channel_id = self._match_id(url)
return self.url_result(
f'https://www.youtube.com/channel/{channel_id}/live',
ie=YoutubeTabIE.ie_key(), video_id=channel_id)
class YoutubeYtUserIE(InfoExtractor):
IE_DESC = 'YouTube user videos; "ytuser:" prefix'
IE_NAME = 'youtube:user'
_VALID_URL = r'ytuser:(?P<id>.+)'
_TESTS = [{
'url': 'ytuser:phihag',
'only_matching': True,
}]
def _real_extract(self, url):
user_id = self._match_id(url)
return self.url_result(
'https://www.youtube.com/user/%s/videos' % user_id,
ie=YoutubeTabIE.ie_key(), video_id=user_id)
class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
IE_NAME = 'youtube:favorites'
IE_DESC = 'YouTube liked videos; ":ytfav" keyword (requires cookies)'
_VALID_URL = r':ytfav(?:ou?rite)?s?'
_LOGIN_REQUIRED = True
_TESTS = [{
'url': ':ytfav',
'only_matching': True,
}, {
'url': ':ytfavorites',
'only_matching': True,
}]
def _real_extract(self, url):
return self.url_result(
'https://www.youtube.com/playlist?list=LL',
ie=YoutubeTabIE.ie_key())
class YoutubeSearchIE(YoutubeTabBaseInfoExtractor, SearchInfoExtractor):
IE_DESC = 'YouTube search'
IE_NAME = 'youtube:search'
_SEARCH_KEY = 'ytsearch'
_SEARCH_PARAMS = 'EgIQAQ%3D%3D' # Videos only
_TESTS = [{
'url': 'ytsearch5:youtube-dl test video',
'playlist_count': 5,
'info_dict': {
'id': 'youtube-dl test video',
'title': 'youtube-dl test video',
}
}]
class YoutubeSearchDateIE(YoutubeTabBaseInfoExtractor, SearchInfoExtractor):
IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
_SEARCH_KEY = 'ytsearchdate'
IE_DESC = 'YouTube search, newest videos first'
_SEARCH_PARAMS = 'CAISAhAB' # Videos only, sorted by date
_TESTS = [{
'url': 'ytsearchdate5:youtube-dl test video',
'playlist_count': 5,
'info_dict': {
'id': 'youtube-dl test video',
'title': 'youtube-dl test video',
}
}]
class YoutubeSearchURLIE(YoutubeTabBaseInfoExtractor):
IE_DESC = 'YouTube search URLs with sorting and filter support'
IE_NAME = YoutubeSearchIE.IE_NAME + '_url'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/(?:results|search)\?([^#]+&)?(?:search_query|q)=(?:[^&]+)(?:[&#]|$)'
_TESTS = [{
'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
'playlist_mincount': 5,
'info_dict': {
'id': 'youtube-dl test video',
'title': 'youtube-dl test video',
}
}, {
'url': 'https://www.youtube.com/results?search_query=python&sp=EgIQAg%253D%253D',
'playlist_mincount': 5,
'info_dict': {
'id': 'python',
'title': 'python',
}
}, {
'url': 'https://www.youtube.com/results?search_query=%23cats',
'playlist_mincount': 1,
'info_dict': {
'id': '#cats',
'title': '#cats',
'entries': [{
'url': r're:https://(www\.)?youtube\.com/hashtag/cats',
'title': '#cats',
}],
},
}, {
'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
'only_matching': True,
}]
def _real_extract(self, url):
qs = parse_qs(url)
query = (qs.get('search_query') or qs.get('q'))[0]
return self.playlist_result(self._search_results(query, qs.get('sp', (None,))[0]), query, query)
class YoutubeMusicSearchURLIE(YoutubeTabBaseInfoExtractor):
IE_DESC = 'YouTube music search URLs with selectable sections (Eg: #songs)'
IE_NAME = 'youtube:music:search_url'
_VALID_URL = r'https?://music\.youtube\.com/search\?([^#]+&)?(?:search_query|q)=(?:[^&]+)(?:[&#]|$)'
_TESTS = [{
'url': 'https://music.youtube.com/search?q=royalty+free+music',
'playlist_count': 16,
'info_dict': {
'id': 'royalty free music',
'title': 'royalty free music',
}
}, {
'url': 'https://music.youtube.com/search?q=royalty+free+music&sp=EgWKAQIIAWoKEAoQAxAEEAkQBQ%3D%3D',
'playlist_mincount': 30,
'info_dict': {
'id': 'royalty free music - songs',
'title': 'royalty free music - songs',
},
'params': {'extract_flat': 'in_playlist'}
}, {
'url': 'https://music.youtube.com/search?q=royalty+free+music#community+playlists',
'playlist_mincount': 30,
'info_dict': {
'id': 'royalty free music - community playlists',
'title': 'royalty free music - community playlists',
},
'params': {'extract_flat': 'in_playlist'}
}]
_SECTIONS = {
'albums': 'EgWKAQIYAWoKEAoQAxAEEAkQBQ==',
'artists': 'EgWKAQIgAWoKEAoQAxAEEAkQBQ==',
'community playlists': 'EgeKAQQoAEABagoQChADEAQQCRAF',
'featured playlists': 'EgeKAQQoADgBagwQAxAJEAQQDhAKEAU==',
'songs': 'EgWKAQIIAWoKEAoQAxAEEAkQBQ==',
'videos': 'EgWKAQIQAWoKEAoQAxAEEAkQBQ==',
}
def _real_extract(self, url):
qs = parse_qs(url)
query = (qs.get('search_query') or qs.get('q'))[0]
params = qs.get('sp', (None,))[0]
if params:
section = next((k for k, v in self._SECTIONS.items() if v == params), params)
else:
section = compat_urllib_parse_unquote_plus((url.split('#') + [''])[1]).lower()
params = self._SECTIONS.get(section)
if not params:
section = None
title = join_nonempty(query, section, delim=' - ')
return self.playlist_result(self._search_results(query, params, default_client='web_music'), title, title)
class YoutubeFeedsInfoExtractor(InfoExtractor):
"""
Base class for feed extractors
Subclasses must define the _FEED_NAME property.
"""
_LOGIN_REQUIRED = True
_TESTS = []
@property
def IE_NAME(self):
return 'youtube:%s' % self._FEED_NAME
def _real_extract(self, url):
return self.url_result(
f'https://www.youtube.com/feed/{self._FEED_NAME}', ie=YoutubeTabIE.ie_key())
class YoutubeWatchLaterIE(InfoExtractor):
IE_NAME = 'youtube:watchlater'
IE_DESC = 'Youtube watch later list; ":ytwatchlater" keyword (requires cookies)'
_VALID_URL = r':ytwatchlater'
_TESTS = [{
'url': ':ytwatchlater',
'only_matching': True,
}]
def _real_extract(self, url):
return self.url_result(
'https://www.youtube.com/playlist?list=WL', ie=YoutubeTabIE.ie_key())
class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube recommended videos; ":ytrec" keyword'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/?(?:[?#]|$)|:ytrec(?:ommended)?'
_FEED_NAME = 'recommended'
_LOGIN_REQUIRED = False
_TESTS = [{
'url': ':ytrec',
'only_matching': True,
}, {
'url': ':ytrecommended',
'only_matching': True,
}, {
'url': 'https://youtube.com',
'only_matching': True,
}]
class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube subscriptions feed; ":ytsubs" keyword (requires cookies)'
_VALID_URL = r':ytsub(?:scription)?s?'
_FEED_NAME = 'subscriptions'
_TESTS = [{
'url': ':ytsubs',
'only_matching': True,
}, {
'url': ':ytsubscriptions',
'only_matching': True,
}]
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'Youtube watch history; ":ythis" keyword (requires cookies)'
_VALID_URL = r':ythis(?:tory)?'
_FEED_NAME = 'history'
_TESTS = [{
'url': ':ythistory',
'only_matching': True,
}]
class YoutubeTruncatedURLIE(InfoExtractor):
IE_NAME = 'youtube:truncated_url'
IE_DESC = False # Do not list
_VALID_URL = r'''(?x)
(?:https?://)?
(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
(?:watch\?(?:
feature=[a-z_]+|
annotation_id=annotation_[^&]+|
x-yt-cl=[0-9]+|
hl=[^&]*|
t=[0-9]+
)?
|
attribution_link\?a=[^&]+
)
$
'''
_TESTS = [{
'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?feature=foo',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?hl=en-GB',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?t=2372',
'only_matching': True,
}]
def _real_extract(self, url):
raise ExtractorError(
'Did you forget to quote the URL? Remember that & is a meta '
'character in most shells, so you want to put the URL in quotes, '
'like youtube-dl '
'"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
' or simply youtube-dl BaW_jenozKc .',
expected=True)
class YoutubeClipIE(InfoExtractor):
IE_NAME = 'youtube:clip'
IE_DESC = False # Do not list
_VALID_URL = r'https?://(?:www\.)?youtube\.com/clip/'
def _real_extract(self, url):
self.report_warning('YouTube clips are not currently supported. The entire video will be downloaded instead')
return self.url_result(url, 'Generic')
class YoutubeTruncatedIDIE(InfoExtractor):
IE_NAME = 'youtube:truncated_id'
IE_DESC = False # Do not list
_VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
_TESTS = [{
'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
raise ExtractorError(
'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
expected=True)
|
#code from https://github.com/HanxunH/Active-Passive-Losses
import torch
import torch.nn.functional as F
import numpy as np
import mlconfig
mlconfig.register(torch.nn.CrossEntropyLoss)
if torch.cuda.is_available():
torch.backends.cudnn.benchmark = True
if torch.cuda.device_count() > 1:
device = torch.device('cuda:0')
else:
device = torch.device('cuda')
else:
device = torch.device('cpu')
@mlconfig.register
class SCELoss(torch.nn.Module):
def __init__(self, alpha, beta, num_classes=10):
super(SCELoss, self).__init__()
self.device = device
self.alpha = alpha
self.beta = beta
self.num_classes = num_classes
self.cross_entropy = torch.nn.CrossEntropyLoss()
def forward(self, pred, labels):
# CCE
ce = self.cross_entropy(pred, labels)
# RCE
pred = F.softmax(pred, dim=1)
pred = torch.clamp(pred, min=1e-7, max=1.0)
label_one_hot = torch.nn.functional.one_hot(labels, self.num_classes).float().to(self.device)
label_one_hot = torch.clamp(label_one_hot, min=1e-4, max=1.0)
rce = (-1*torch.sum(pred * torch.log(label_one_hot), dim=1))
# Loss
loss = self.alpha * ce + self.beta * rce.mean()
return loss
@mlconfig.register
class ReverseCrossEntropy(torch.nn.Module):
def __init__(self, num_classes, scale=1.0):
super(ReverseCrossEntropy, self).__init__()
self.device = device
self.num_classes = num_classes
self.scale = scale
def forward(self, pred, labels):
pred = F.softmax(pred, dim=1)
pred = torch.clamp(pred, min=1e-7, max=1.0)
label_one_hot = torch.nn.functional.one_hot(labels, self.num_classes).float().to(self.device)
label_one_hot = torch.clamp(label_one_hot, min=1e-4, max=1.0)
rce = (-1*torch.sum(pred * torch.log(label_one_hot), dim=1))
return self.scale * rce.mean()
@mlconfig.register
class NormalizedReverseCrossEntropy(torch.nn.Module):
def __init__(self, num_classes, scale=1.0):
super(NormalizedReverseCrossEntropy, self).__init__()
self.device = device
self.num_classes = num_classes
self.scale = scale
def forward(self, pred, labels):
pred = F.softmax(pred, dim=1)
pred = torch.clamp(pred, min=1e-7, max=1.0)
label_one_hot = torch.nn.functional.one_hot(labels, self.num_classes).float().to(self.device)
label_one_hot = torch.clamp(label_one_hot, min=1e-4, max=1.0)
normalizor = 1 / 4 * (self.num_classes - 1)
rce = (-1*torch.sum(pred * torch.log(label_one_hot), dim=1))
return self.scale * normalizor * rce.mean()
@mlconfig.register
class NormalizedCrossEntropy(torch.nn.Module):
def __init__(self, num_classes, scale=1.0):
super(NormalizedCrossEntropy, self).__init__()
self.device = device
self.num_classes = num_classes
self.scale = scale
def forward(self, pred, labels):
pred = F.log_softmax(pred, dim=1)
label_one_hot = torch.nn.functional.one_hot(labels, self.num_classes).float().to(self.device)
nce = -1 * torch.sum(label_one_hot * pred, dim=1) / (- pred.sum(dim=1))
return self.scale * nce.mean()
@mlconfig.register
class GeneralizedCrossEntropy(torch.nn.Module):
def __init__(self, num_classes, q=0.7):
super(GeneralizedCrossEntropy, self).__init__()
self.device = device
self.num_classes = num_classes
self.q = q
def forward(self, pred, labels):
pred = F.softmax(pred, dim=1)
pred = torch.clamp(pred, min=1e-7, max=1.0)
label_one_hot = torch.nn.functional.one_hot(labels, self.num_classes).float().to(self.device)
gce = (1. - torch.pow(torch.sum(label_one_hot * pred, dim=1), self.q)) / self.q
return gce.mean()
@mlconfig.register
class NormalizedGeneralizedCrossEntropy(torch.nn.Module):
def __init__(self, num_classes, scale=1.0, q=0.7):
super(NormalizedGeneralizedCrossEntropy, self).__init__()
self.device = device
self.num_classes = num_classes
self.q = q
self.scale = scale
def forward(self, pred, labels):
pred = F.softmax(pred, dim=1)
pred = torch.clamp(pred, min=1e-7, max=1.0)
label_one_hot = torch.nn.functional.one_hot(labels, self.num_classes).float().to(self.device)
numerators = 1. - torch.pow(torch.sum(label_one_hot * pred, dim=1), self.q)
denominators = self.num_classes - pred.pow(self.q).sum(dim=1)
ngce = numerators / denominators
return self.scale * ngce.mean()
@mlconfig.register
class MeanAbsoluteError(torch.nn.Module):
def __init__(self, num_classes, scale=1.0):
super(MeanAbsoluteError, self).__init__()
self.device = device
self.num_classes = num_classes
self.scale = scale
return
def forward(self, pred, labels):
pred = F.softmax(pred, dim=1)
label_one_hot = torch.nn.functional.one_hot(labels, self.num_classes).float().to(self.device)
mae = 1. - torch.sum(label_one_hot * pred, dim=1)
# Note: Reduced MAE
# Original: torch.abs(pred - label_one_hot).sum(dim=1)
# $MAE = \sum_{k=1}^{K} |\bm{p}(k|\bm{x}) - \bm{q}(k|\bm{x})|$
# $MAE = \sum_{k=1}^{K}\bm{p}(k|\bm{x}) - p(y|\bm{x}) + (1 - p(y|\bm{x}))$
# $MAE = 2 - 2p(y|\bm{x})$
#
return self.scale * mae.mean()
@mlconfig.register
class NormalizedMeanAbsoluteError(torch.nn.Module):
def __init__(self, num_classes, scale=1.0):
super(NormalizedMeanAbsoluteError, self).__init__()
self.device = device
self.num_classes = num_classes
self.scale = scale
return
def forward(self, pred, labels):
pred = F.softmax(pred, dim=1)
label_one_hot = torch.nn.functional.one_hot(labels, self.num_classes).float().to(self.device)
normalizor = 1 / (2 * (self.num_classes - 1))
mae = 1. - torch.sum(label_one_hot * pred, dim=1)
return self.scale * normalizor * mae.mean()
@mlconfig.register
class NCEandRCE(torch.nn.Module):
def __init__(self, alpha, beta, num_classes):
super(NCEandRCE, self).__init__()
self.num_classes = num_classes
self.nce = NormalizedCrossEntropy(scale=alpha, num_classes=num_classes)
self.rce = ReverseCrossEntropy(scale=beta, num_classes=num_classes)
def forward(self, pred, labels):
return self.nce(pred, labels) + self.rce(pred, labels)
@mlconfig.register
class NCEandMAE(torch.nn.Module):
def __init__(self, alpha, beta, num_classes):
super(NCEandMAE, self).__init__()
self.num_classes = num_classes
self.nce = NormalizedCrossEntropy(scale=alpha, num_classes=num_classes)
self.mae = MeanAbsoluteError(scale=beta, num_classes=num_classes)
def forward(self, pred, labels):
return self.nce(pred, labels) + self.mae(pred, labels)
@mlconfig.register
class GCEandMAE(torch.nn.Module):
def __init__(self, alpha, beta, num_classes, q=0.7):
super(GCEandMAE, self).__init__()
self.num_classes = num_classes
self.gce = GeneralizedCrossEntropy(num_classes=num_classes, q=q)
self.mae = MeanAbsoluteError(scale=beta, num_classes=num_classes)
def forward(self, pred, labels):
return self.gce(pred, labels) + self.mae(pred, labels)
@mlconfig.register
class GCEandRCE(torch.nn.Module):
def __init__(self, alpha, beta, num_classes, q=0.7):
super(GCEandRCE, self).__init__()
self.num_classes = num_classes
self.gce = GeneralizedCrossEntropy(num_classes=num_classes, q=q)
self.rce = ReverseCrossEntropy(scale=beta, num_classes=num_classes)
def forward(self, pred, labels):
return self.gce(pred, labels) + self.rce(pred, labels)
@mlconfig.register
class GCEandNCE(torch.nn.Module):
def __init__(self, alpha, beta, num_classes, q=0.7):
super(GCEandNCE, self).__init__()
self.num_classes = num_classes
self.gce = GeneralizedCrossEntropy(num_classes=num_classes, q=q)
self.nce = NormalizedCrossEntropy(num_classes=num_classes)
def forward(self, pred, labels):
return self.gce(pred, labels) + self.nce(pred, labels)
@mlconfig.register
class NGCEandNCE(torch.nn.Module):
def __init__(self, alpha, beta, num_classes, q=0.7):
super(NGCEandNCE, self).__init__()
self.num_classes = num_classes
self.ngce = NormalizedGeneralizedCrossEntropy(scale=alpha, q=q, num_classes=num_classes)
self.nce = NormalizedCrossEntropy(scale=beta, num_classes=num_classes)
def forward(self, pred, labels):
return self.ngce(pred, labels) + self.nce(pred, labels)
@mlconfig.register
class NGCEandMAE(torch.nn.Module):
def __init__(self, alpha, beta, num_classes, q=0.7):
super(NGCEandMAE, self).__init__()
self.num_classes = num_classes
self.ngce = NormalizedGeneralizedCrossEntropy(scale=alpha, q=q, num_classes=num_classes)
self.mae = MeanAbsoluteError(scale=beta, num_classes=num_classes)
def forward(self, pred, labels):
return self.ngce(pred, labels) + self.mae(pred, labels)
@mlconfig.register
class NGCEandRCE(torch.nn.Module):
def __init__(self, alpha, beta, num_classes, q=0.7):
super(NGCEandRCE, self).__init__()
self.num_classes = num_classes
self.ngce = NormalizedGeneralizedCrossEntropy(scale=alpha, q=q, num_classes=num_classes)
self.rce = ReverseCrossEntropy(scale=beta, num_classes=num_classes)
def forward(self, pred, labels):
return self.ngce(pred, labels) + self.rce(pred, labels)
@mlconfig.register
class MAEandRCE(torch.nn.Module):
def __init__(self, alpha, beta, num_classes):
super(MAEandRCE, self).__init__()
self.num_classes = num_classes
self.mae = MeanAbsoluteError(scale=alpha, num_classes=num_classes)
self.rce = ReverseCrossEntropy(scale=beta, num_classes=num_classes)
def forward(self, pred, labels):
return self.mae(pred, labels) + self.rce(pred, labels)
@mlconfig.register
class NLNL(torch.nn.Module):
def __init__(self, train_loader, num_classes, ln_neg=1):
super(NLNL, self).__init__()
self.device = device
self.num_classes = num_classes
self.ln_neg = ln_neg
weight = torch.FloatTensor(num_classes).zero_() + 1.
if not hasattr(train_loader.dataset, 'targets'):
weight = [1] * num_classes
weight = torch.FloatTensor(weight)
else:
for i in range(num_classes):
weight[i] = (torch.from_numpy(np.array(train_loader.dataset.targets)) == i).sum()
weight = 1 / (weight / weight.max())
self.weight = weight.to(self.device)
self.criterion = torch.nn.CrossEntropyLoss(weight=self.weight)
self.criterion_nll = torch.nn.NLLLoss()
def forward(self, pred, labels):
labels_neg = (labels.unsqueeze(-1).repeat(1, self.ln_neg)
+ torch.LongTensor(len(labels), self.ln_neg).to(self.device).random_(1, self.num_classes)) % self.num_classes
labels_neg = torch.autograd.Variable(labels_neg)
assert labels_neg.max() <= self.num_classes-1
assert labels_neg.min() >= 0
assert (labels_neg != labels.unsqueeze(-1).repeat(1, self.ln_neg)).sum() == len(labels)*self.ln_neg
s_neg = torch.log(torch.clamp(1. - F.softmax(pred, 1), min=1e-5, max=1.))
s_neg *= self.weight[labels].unsqueeze(-1).expand(s_neg.size()).to(self.device)
labels = labels * 0 - 100
loss = self.criterion(pred, labels) * float((labels >= 0).sum())
loss_neg = self.criterion_nll(s_neg.repeat(self.ln_neg, 1), labels_neg.t().contiguous().view(-1)) * float((labels_neg >= 0).sum())
loss = ((loss+loss_neg) / (float((labels >= 0).sum())+float((labels_neg[:, 0] >= 0).sum())))
return loss
@mlconfig.register
class FocalLoss(torch.nn.Module):
'''
https://github.com/clcarwin/focal_loss_pytorch/blob/master/focalloss.py
'''
def __init__(self, gamma=0, alpha=None, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
if isinstance(alpha, (float, int)):
self.alpha = torch.Tensor([alpha, 1-alpha])
if isinstance(alpha, list):
self.alpha = torch.Tensor(alpha)
self.size_average = size_average
def forward(self, input, target):
if input.dim() > 2:
input = input.view(input.size(0), input.size(1), -1) # N,C,H,W => N,C,H*W
input = input.transpose(1, 2) # N,C,H*W => N,H*W,C
input = input.contiguous().view(-1, input.size(2)) # N,H*W,C => N*H*W,C
target = target.view(-1, 1)
logpt = F.log_softmax(input, dim=1)
logpt = logpt.gather(1, target)
logpt = logpt.view(-1)
pt = torch.autograd.Variable(logpt.data.exp())
if self.alpha is not None:
if self.alpha.type() != input.data.type():
self.alpha = self.alpha.type_as(input.data)
at = self.alpha.gather(0, target.data.view(-1))
logpt = logpt * torch.autograd.Variable(at)
loss = -1 * (1-pt)**self.gamma * logpt
if self.size_average:
return loss.mean()
else:
return loss.sum()
@mlconfig.register
class NormalizedFocalLoss(torch.nn.Module):
def __init__(self, scale=1.0, gamma=0, num_classes=10, alpha=None, size_average=True):
super(NormalizedFocalLoss, self).__init__()
self.gamma = gamma
self.size_average = size_average
self.num_classes = num_classes
self.scale = scale
def forward(self, input, target):
target = target.view(-1, 1)
logpt = F.log_softmax(input, dim=1)
normalizor = torch.sum(-1 * (1 - logpt.data.exp()) ** self.gamma * logpt, dim=1)
logpt = logpt.gather(1, target)
logpt = logpt.view(-1)
pt = torch.autograd.Variable(logpt.data.exp())
loss = -1 * (1-pt)**self.gamma * logpt
loss = self.scale * loss / normalizor
if self.size_average:
return loss.mean()
else:
return loss.sum()
@mlconfig.register
class NFLandNCE(torch.nn.Module):
def __init__(self, alpha, beta, num_classes, gamma=0.5):
super(NFLandNCE, self).__init__()
self.num_classes = num_classes
self.nfl = NormalizedFocalLoss(scale=alpha, gamma=gamma, num_classes=num_classes)
self.nce = NormalizedCrossEntropy(scale=beta, num_classes=num_classes)
def forward(self, pred, labels):
return self.nfl(pred, labels) + self.nce(pred, labels)
@mlconfig.register
class NFLandMAE(torch.nn.Module):
def __init__(self, alpha, beta, num_classes, gamma=0.5):
super(NFLandMAE, self).__init__()
self.num_classes = num_classes
self.nfl = NormalizedFocalLoss(scale=alpha, gamma=gamma, num_classes=num_classes)
self.mae = MeanAbsoluteError(scale=beta, num_classes=num_classes)
def forward(self, pred, labels):
return self.nfl(pred, labels) + self.mae(pred, labels)
@mlconfig.register
class NFLandRCE(torch.nn.Module):
def __init__(self, alpha, beta, num_classes, gamma=0.5):
super(NFLandRCE, self).__init__()
self.num_classes = num_classes
self.nfl = NormalizedFocalLoss(scale=alpha, gamma=gamma, num_classes=num_classes)
self.rce = ReverseCrossEntropy(scale=beta, num_classes=num_classes)
def forward(self, pred, labels):
return self.nfl(pred, labels) + self.rce(pred, labels)
@mlconfig.register
class DMILoss(torch.nn.Module):
def __init__(self, num_classes):
super(DMILoss, self).__init__()
self.num_classes = num_classes
def forward(self, output, target):
outputs = F.softmax(output, dim=1)
targets = target.reshape(target.size(0), 1).cpu()
y_onehot = torch.FloatTensor(target.size(0), self.num_classes).zero_()
y_onehot.scatter_(1, targets, 1)
y_onehot = y_onehot.transpose(0, 1).cuda()
mat = y_onehot @ outputs
return -1.0 * torch.log(torch.abs(torch.det(mat.float())) + 0.001)
|
import matplotlib.pyplot as plt
import numpy as np
import os
import get_dc_data
# Differential figure.
casedata = get_dc_data.retrieve(download=False)
f2 = plt.figure(figsize=(6,4))
plt.suptitle("COVID-19 Data Summary, District of Columbia ",
fontweight="bold")
plt.title("github.com/reidac/covid19-curve-dc", style="oblique")
plt.xlabel("Days since March 8, 2020")
plt.ylabel("Increments")
inclen = len(casedata.positive)-1
total_incs = [casedata.positive[i+1]-casedata.positive[i] for i in range(inclen)]
pos_incs = [casedata.deaths[i+1]-casedata.deaths[i] for i in range(inclen)]
# recov_incs = [casedata.recovered[i+1]-casedata.recovered[i] for i in range(inclen)]
plt.bar(casedata.x[1:],total_incs,color='b',width=1.0)
plt.bar(casedata.x[1:],pos_incs,color='r',width=1.0)
# plt.bar(casedata.x[:-1]+0.4,recov_incs,color='g',width=0.4)
plt.legend(labels=['Positives','Deaths'])
if "FIG_PATH" in os.environ:
fig_path = os.environ['FIG_PATH']
else:
fig_path = "."
plt.savefig("{0}/us_dc_bars.png".format(fig_path),dpi=300,bbox_inches="tight")
print("Bar graph of case and death increments vs. date for the District of Columbia.")
|
import pygame
from States.Baseclass import Base
from Functions.textfunctions import *
from GameConstants.constants import *
from GameConstants.variables import *
from Classes.buttons import Button
startbtn = Button(x = WINDOW_WIDTH // 2 - 200, y = WINDOW_HEIGHT // 2 + 200, text="Play Again", color=GREEN, color2 = DARKGREEN)
settingsbtn = Button(x = WINDOW_WIDTH // 2 - 50, y = WINDOW_HEIGHT // 2 + 200, text="Settings", color=BLUE, color2 = DARKBLUE)
endbtn = Button(x = WINDOW_WIDTH // 2 + 100, y = WINDOW_HEIGHT // 2 + 200, text="Quit", color=RED, color2 = DARKRED)
class Over(Base):
def __init__(self):
super().__init__()
def render(self):
global startbtn, settingsbtn, endbtn, SCORE, HIGH_SCORE
print_text("Game Over!", RED, WINDOW_WIDTH // 2, WINDOW_HEIGHT // 2, 72)
startbtn.render()
settingsbtn.render()
endbtn.render()
def update(self, params):
global startbtn, settingsbtn, endbtn, GAME_STATE_VARIABLES
startbtn.update()
settingsbtn.update()
endbtn.update()
if startbtn.clicked() : GAME_STATE_VARIABLES.change("play")
if settingsbtn.clicked() : GAME_STATE_VARIABLES.change("settings")
if endbtn.clicked() :
pygame.quit()
quit()
self.render()
|
'''
Created by auto_sdk on 2016.09.13
'''
from top.api.base import RestApi
class ItemUpdateRequest(RestApi):
def __init__(self, domain='gw.api.taobao.com', port=80):
RestApi.__init__(self, domain, port)
self.after_sale_id = None
self.approve_status = None
self.auction_point = None
self.auto_fill = None
self.auto_repost = None
self.barcode = None
self.change_prop = None
self.chaoshi_extends_info = None
self.cid = None
self.cod_postage_id = None
self.cpv_memo = None
self.delivery_time_delivery_time = None
self.delivery_time_delivery_time_type = None
self.delivery_time_need_delivery_time = None
self.desc = None
self.desc_modules = None
self.empty_fields = None
self.ems_fee = None
self.express_fee = None
self.features = None
self.food_security_contact = None
self.food_security_design_code = None
self.food_security_factory = None
self.food_security_factory_site = None
self.food_security_food_additive = None
self.food_security_health_product_no = None
self.food_security_mix = None
self.food_security_period = None
self.food_security_plan_storage = None
self.food_security_prd_license_no = None
self.food_security_product_date_end = None
self.food_security_product_date_start = None
self.food_security_stock_date_end = None
self.food_security_stock_date_start = None
self.food_security_supplier = None
self.freight_payer = None
self.global_stock_country = None
self.global_stock_delivery_place = None
self.global_stock_tax_free_promise = None
self.global_stock_type = None
self.has_discount = None
self.has_invoice = None
self.has_showcase = None
self.has_warranty = None
self.ignorewarning = None
self.image = None
self.increment = None
self.input_custom_cpv = None
self.input_pids = None
self.input_str = None
self.is_3D = None
self.is_ex = None
self.is_lightning_consignment = None
self.is_offline = None
self.is_replace_sku = None
self.is_taobao = None
self.is_xinpin = None
self.item_size = None
self.item_weight = None
self.lang = None
self.list_time = None
self.locality_life_choose_logis = None
self.locality_life_eticket = None
self.locality_life_expirydate = None
self.locality_life_merchant = None
self.locality_life_network_id = None
self.locality_life_obs = None
self.locality_life_onsale_auto_refund_ratio = None
self.locality_life_packageid = None
self.locality_life_refund_ratio = None
self.locality_life_refundmafee = None
self.locality_life_verification = None
self.locality_life_version = None
self.location_city = None
self.location_state = None
self.ms_payment_price = None
self.ms_payment_reference_price = None
self.ms_payment_voucher_price = None
self.newprepay = None
self.num = None
self.num_iid = None
self.o2o_bind_service = None
self.outer_id = None
self.paimai_info_deposit = None
self.paimai_info_interval = None
self.paimai_info_mode = None
self.paimai_info_reserve = None
self.paimai_info_valid_hour = None
self.paimai_info_valid_minute = None
self.pic_path = None
self.post_fee = None
self.postage_id = None
self.price = None
self.product_id = None
self.property_alias = None
self.props = None
self.qualification = None
self.scenic_ticket_book_cost = None
self.scenic_ticket_pay_way = None
self.sell_point = None
self.sell_promise = None
self.seller_cids = None
self.shape = None
self.sku_barcode = None
self.sku_delivery_times = None
self.sku_hd_height = None
self.sku_hd_lamp_quantity = None
self.sku_hd_length = None
self.sku_outer_ids = None
self.sku_prices = None
self.sku_properties = None
self.sku_quantities = None
self.sku_spec_ids = None
self.spu_confirm = None
self.stuff_status = None
self.sub_stock = None
self.title = None
self.valid_thru = None
self.weight = None
self.wireless_desc = None
def getapiname(self):
return 'taobao.item.update'
def getMultipartParas(self):
return ['image']
def getTranslateParas(self):
return {'food_security_stock_date_start': 'food_security.stock_date_start',
'locality_life_choose_logis': 'locality_life.choose_logis',
'food_security_plan_storage': 'food_security.plan_storage',
'delivery_time_need_delivery_time': 'delivery_time.need_delivery_time',
'ms_payment_reference_price': 'ms_payment.reference_price',
'paimai_info_reserve': 'paimai_info.reserve', 'food_security_factory': 'food_security.factory',
'food_security_product_date_end': 'food_security.product_date_end', 'location_state': 'location.state',
'food_security_contact': 'food_security.contact',
'locality_life_onsale_auto_refund_ratio': 'locality_life.onsale_auto_refund_ratio',
'ms_payment_price': 'ms_payment.price', 'locality_life_refundmafee': 'locality_life.refundmafee',
'food_security_period': 'food_security.period',
'food_security_stock_date_end': 'food_security.stock_date_end',
'paimai_info_valid_minute': 'paimai_info.valid_minute',
'locality_life_merchant': 'locality_life.merchant',
'food_security_health_product_no': 'food_security.health_product_no',
'paimai_info_mode': 'paimai_info.mode', 'locality_life_obs': 'locality_life.obs',
'ms_payment_voucher_price': 'ms_payment.voucher_price', 'food_security_mix': 'food_security.mix',
'locality_life_packageid': 'locality_life.packageid',
'food_security_prd_license_no': 'food_security.prd_license_no',
'paimai_info_valid_hour': 'paimai_info.valid_hour', 'paimai_info_interval': 'paimai_info.interval',
'location_city': 'location.city', 'food_security_design_code': 'food_security.design_code',
'paimai_info_deposit': 'paimai_info.deposit',
'delivery_time_delivery_time_type': 'delivery_time.delivery_time_type',
'locality_life_eticket': 'locality_life.eticket',
'food_security_food_additive': 'food_security.food_additive',
'locality_life_network_id': 'locality_life.network_id',
'food_security_supplier': 'food_security.supplier',
'food_security_factory_site': 'food_security.factory_site',
'locality_life_version': 'locality_life.version',
'food_security_product_date_start': 'food_security.product_date_start',
'locality_life_expirydate': 'locality_life.expirydate',
'locality_life_verification': 'locality_life.verification',
'locality_life_refund_ratio': 'locality_life.refund_ratio',
'delivery_time_delivery_time': 'delivery_time.delivery_time'}
|
import _plotly_utils.basevalidators
class TitleValidator(_plotly_utils.basevalidators.TitleValidator):
def __init__(self, plotly_name="title", parent_name="carpet.aaxis", **kwargs):
super(TitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Title"),
data_docs=kwargs.pop(
"data_docs",
"""
font
Sets this axis' title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
offset
An additional amount by which to offset the
title from the tick labels, given in pixels.
Note that this used to be set by the now
deprecated `titleoffset` attribute.
text
Sets the title of this axis. Note that before
the existence of `title.text`, the title's
contents used to be defined as the `title`
attribute itself. This behavior has been
deprecated.
""",
),
**kwargs,
)
|
import py
class ns(py.xml.Namespace):
pass
def test_tag_with_text():
x = ns.hello("world")
u = unicode(x)
assert u == "<hello>world</hello>"
def test_class_identity():
assert ns.hello is ns.hello
def test_tag_with_text_and_attributes():
x = ns.some(name="hello", value="world")
assert x.attr.name == 'hello'
assert x.attr.value == 'world'
u = unicode(x)
assert u == '<some name="hello" value="world"/>'
def test_tag_with_subclassed_attr_simple():
class my(ns.hello):
class Attr(ns.hello.Attr):
hello="world"
x = my()
assert x.attr.hello == 'world'
assert unicode(x) == '<my hello="world"/>'
def test_tag_nested():
x = ns.hello(ns.world())
unicode(x) # triggers parentifying
assert x[0].parent is x
u = unicode(x)
assert u == '<hello><world/></hello>'
def test_tag_xmlname():
class my(ns.hello):
xmlname = 'world'
u = unicode(my())
assert u == '<world/>'
def test_tag_with_text_entity():
x = ns.hello('world & rest')
u = unicode(x)
assert u == "<hello>world & rest</hello>"
def test_tag_with_text_and_attributes_entity():
x = ns.some(name="hello & world")
assert x.attr.name == "hello & world"
u = unicode(x)
assert u == '<some name="hello & world"/>'
def test_raw():
x = ns.some(py.xml.raw("<p>literal</p>"))
u = unicode(x)
assert u == "<some><p>literal</p></some>"
|
import scipy.sparse as sp
import tensorflow as tf
from .convert import sparse_to_tensor
class SparseTest(tf.test.TestCase):
def test_sparse_to_tensor(self):
value = [[0, 1, 0], [1, 0, 2], [0, 2, 0]]
value = sp.coo_matrix(value)
with self.test_session():
self.assertAllEqual(
tf.sparse_tensor_to_dense(sparse_to_tensor(value)).eval(),
value.toarray())
def test_sparse_feed_dict(self):
value = [[0, 1, 0], [1, 0, 2], [0, 2, 0]]
value = sp.coo_matrix(value)
value = sparse_to_tensor(value)
# Sparse placeholder is buggy and can't convert shape.
# => Need to pass empty shape.
placeholder = tf.sparse_placeholder(tf.float32)
output = tf.sparse_tensor_to_dense(placeholder)
expected = [[0, 1, 0], [1, 0, 2], [0, 2, 0]]
with self.test_session() as sess:
result = sess.run(output, feed_dict={placeholder: value})
self.assertAllEqual(result, expected)
|
from Midi import Midi
import os
import matplotlib.pyplot as plt
def delete_repetition(note_list):
new_list = [note_list[0]]
for i in range(1, len(note_list)):
if note_list[i - 1][0] != note_list[i][0]:
new_list.append(note_list[i])
return new_list
def get_pattern(file_out):
direction = 'Children'
files = os.listdir(direction)
pattern_dict = {}
for file in files:
path = direction + '/' + file
if os.path.isfile(path):
midi = Midi(path)
note_list = midi.output_as_note_list(normalize=True, clean=True)
note_list = delete_repetition(note_list)
for i in range(1, len(note_list)):
# Define your pattern here, remember to modify the iterator's range.
# pattern = note_list[i][0] - note_list[i - 1][0]
# pattern = note_list[i][0] + note_list[i - 2][0] - note_list[i - 1][0] * 2
# pattern = (note_list[i - 1][0] - note_list[i - 2][0], note_list[i][0] - note_list[i - 1][0])
pattern = (note_list[i - 1][2] * 4, note_list[i][2] * 4)
if pattern_dict.get(pattern) is not None:
pattern_dict[pattern] += 1
else:
pattern_dict[pattern] = 1
pattern_list = list(pattern_dict.items())
pattern_list.sort(key=lambda item: -item[1])
file_out = 'results/' + direction + '/' + file_out
f = open(file_out, 'w')
for i in range(len(pattern_list)):
f.write("%d %d %d\n" % (pattern_list[i][0][0], pattern_list[i][0][1], pattern_list[i][1]))
# f.write("%d %d\n" % (pattern_list[i][0], pattern_list[i][1]))
f.close()
return pattern_list
def plot(xlabel, ylabel, file_in, file_out, num = None):
direction = 'Children'
file_in = 'results/' + direction + '/' + file_in
f = open(file_in, 'r')
x = []
y = []
label = []
i = 0
for line in f.readlines():
x.append(i)
# v1, v2 = tuple(map(int, line.split()))
v1, v2, v3 = tuple(map(int, line.split()))
# y.append(v2)
# label.append(v1)
y.append(v3)
label.append((v1, v2))
i += 1
f.close()
x = x[:num]
y = y[:num]
label = label[:num]
plt.figure(figsize=(20, 4))
plt.title('Frequency of Length Tuple in %s Set' % direction)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.xticks(x, label)
plt.bar(x, y, 0.5, align='center')
file_out = 'results/' + direction + '/' + file_out
plt.savefig(file_out)
plt.show()
get_pattern('length_tuple.txt')
plot('Length Tuple', 'Frequency', 'length_tuple.txt', 'length_tuple.jpg', 32)
# get_pattern('difference_tuple.txt')
# plot('Difference Tuple', 'Frequency', 'difference_tuple.txt', 'difference_tuple.jpg', 32)
# get_pattern('2nddiff-freq.txt')
# plot('Second Difference', 'Frequency', '2nddiff-freq.txt', '2nddiff-freq.jpg', 32)
# get_pattern('2nddiff-freq(without repetition).txt')
# plot('Second Difference', 'Frequency', '2nddiff-freq(without repetition).txt', '2nddiff-freq(without repetition).jpg', 32)
# get_pattern('diff-freq(without repetition).txt')
# plot('Difference', 'Frequency', 'diff-freq(without repetition).txt', 'diff-freq(without repetition).jpg', 32)
# get_pattern('diff-freq.txt')
# plot('Difference', 'Frequency', 'diff-freq.txt', 'diff-freq.jpg', 32)
|
# xloverlay/__init__.py
""" Overlay library for python XLattice packages. """
__version__ = '0.0.8'
__version_date__ = '2018-03-08'
__all__ = ['__version__', '__version_date__', 'XLOverlayError', ]
class XLOverlayError(RuntimeError):
""" General purpose exception for the package. """
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for data input for speech commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
from tensorflow.contrib.framework.python.ops import audio_ops as contrib_audio
from tensorflow.examples.speech_commands import input_data
from tensorflow.examples.speech_commands import models
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class InputDataTest(test.TestCase):
def _getWavData(self):
with self.cached_session() as sess:
sample_data = tf.zeros([32000, 2])
wav_encoder = contrib_audio.encode_wav(sample_data, 16000)
wav_data = self.evaluate(wav_encoder)
return wav_data
def _saveTestWavFile(self, filename, wav_data):
with open(filename, "wb") as f:
f.write(wav_data)
def _saveWavFolders(self, root_dir, labels, how_many):
wav_data = self._getWavData()
for label in labels:
dir_name = os.path.join(root_dir, label)
os.mkdir(dir_name)
for i in range(how_many):
file_path = os.path.join(dir_name, "some_audio_%d.wav" % i)
self._saveTestWavFile(file_path, wav_data)
def _model_settings(self):
return {
"desired_samples": 160,
"fingerprint_size": 40,
"label_count": 4,
"window_size_samples": 100,
"window_stride_samples": 100,
"fingerprint_width": 40,
"preprocess": "mfcc",
}
def _runGetDataTest(self, preprocess, window_length_ms):
tmp_dir = self.get_temp_dir()
wav_dir = os.path.join(tmp_dir, "wavs")
os.mkdir(wav_dir)
self._saveWavFolders(wav_dir, ["a", "b", "c"], 100)
background_dir = os.path.join(wav_dir, "_background_noise_")
os.mkdir(background_dir)
wav_data = self._getWavData()
for i in range(10):
file_path = os.path.join(background_dir, "background_audio_%d.wav" % i)
self._saveTestWavFile(file_path, wav_data)
model_settings = models.prepare_model_settings(
4, 16000, 1000, window_length_ms, 20, 40, preprocess)
with self.cached_session() as sess:
audio_processor = input_data.AudioProcessor(
"", wav_dir, 10, 10, ["a", "b"], 10, 10, model_settings, tmp_dir)
result_data, result_labels = audio_processor.get_data(
10, 0, model_settings, 0.3, 0.1, 100, "training", sess)
self.assertEqual(10, len(result_data))
self.assertEqual(10, len(result_labels))
def testPrepareWordsList(self):
words_list = ["a", "b"]
self.assertGreater(
len(input_data.prepare_words_list(words_list)), len(words_list))
def testWhichSet(self):
self.assertEqual(
input_data.which_set("foo.wav", 10, 10),
input_data.which_set("foo.wav", 10, 10))
self.assertEqual(
input_data.which_set("foo_nohash_0.wav", 10, 10),
input_data.which_set("foo_nohash_1.wav", 10, 10))
@test_util.run_deprecated_v1
def testPrepareDataIndex(self):
tmp_dir = self.get_temp_dir()
self._saveWavFolders(tmp_dir, ["a", "b", "c"], 100)
audio_processor = input_data.AudioProcessor("", tmp_dir, 10, 10,
["a", "b"], 10, 10,
self._model_settings(), tmp_dir)
self.assertLess(0, audio_processor.set_size("training"))
self.assertTrue("training" in audio_processor.data_index)
self.assertTrue("validation" in audio_processor.data_index)
self.assertTrue("testing" in audio_processor.data_index)
self.assertEquals(input_data.UNKNOWN_WORD_INDEX,
audio_processor.word_to_index["c"])
def testPrepareDataIndexEmpty(self):
tmp_dir = self.get_temp_dir()
self._saveWavFolders(tmp_dir, ["a", "b", "c"], 0)
with self.assertRaises(Exception) as e:
_ = input_data.AudioProcessor("", tmp_dir, 10, 10, ["a", "b"], 10, 10,
self._model_settings(), tmp_dir)
self.assertTrue("No .wavs found" in str(e.exception))
def testPrepareDataIndexMissing(self):
tmp_dir = self.get_temp_dir()
self._saveWavFolders(tmp_dir, ["a", "b", "c"], 100)
with self.assertRaises(Exception) as e:
_ = input_data.AudioProcessor("", tmp_dir, 10, 10, ["a", "b", "d"], 10,
10, self._model_settings(), tmp_dir)
self.assertTrue("Expected to find" in str(e.exception))
@test_util.run_deprecated_v1
def testPrepareBackgroundData(self):
tmp_dir = self.get_temp_dir()
background_dir = os.path.join(tmp_dir, "_background_noise_")
os.mkdir(background_dir)
wav_data = self._getWavData()
for i in range(10):
file_path = os.path.join(background_dir, "background_audio_%d.wav" % i)
self._saveTestWavFile(file_path, wav_data)
self._saveWavFolders(tmp_dir, ["a", "b", "c"], 100)
audio_processor = input_data.AudioProcessor("", tmp_dir, 10, 10,
["a", "b"], 10, 10,
self._model_settings(), tmp_dir)
self.assertEqual(10, len(audio_processor.background_data))
def testLoadWavFile(self):
tmp_dir = self.get_temp_dir()
file_path = os.path.join(tmp_dir, "load_test.wav")
wav_data = self._getWavData()
self._saveTestWavFile(file_path, wav_data)
sample_data = input_data.load_wav_file(file_path)
self.assertIsNotNone(sample_data)
def testSaveWavFile(self):
tmp_dir = self.get_temp_dir()
file_path = os.path.join(tmp_dir, "load_test.wav")
save_data = np.zeros([16000, 1])
input_data.save_wav_file(file_path, save_data, 16000)
loaded_data = input_data.load_wav_file(file_path)
self.assertIsNotNone(loaded_data)
self.assertEqual(16000, len(loaded_data))
@test_util.run_deprecated_v1
def testPrepareProcessingGraph(self):
tmp_dir = self.get_temp_dir()
wav_dir = os.path.join(tmp_dir, "wavs")
os.mkdir(wav_dir)
self._saveWavFolders(wav_dir, ["a", "b", "c"], 100)
background_dir = os.path.join(wav_dir, "_background_noise_")
os.mkdir(background_dir)
wav_data = self._getWavData()
for i in range(10):
file_path = os.path.join(background_dir, "background_audio_%d.wav" % i)
self._saveTestWavFile(file_path, wav_data)
model_settings = {
"desired_samples": 160,
"fingerprint_size": 40,
"label_count": 4,
"window_size_samples": 100,
"window_stride_samples": 100,
"fingerprint_width": 40,
"preprocess": "mfcc",
}
audio_processor = input_data.AudioProcessor("", wav_dir, 10, 10, ["a", "b"],
10, 10, model_settings, tmp_dir)
self.assertIsNotNone(audio_processor.wav_filename_placeholder_)
self.assertIsNotNone(audio_processor.foreground_volume_placeholder_)
self.assertIsNotNone(audio_processor.time_shift_padding_placeholder_)
self.assertIsNotNone(audio_processor.time_shift_offset_placeholder_)
self.assertIsNotNone(audio_processor.background_data_placeholder_)
self.assertIsNotNone(audio_processor.background_volume_placeholder_)
self.assertIsNotNone(audio_processor.output_)
@test_util.run_deprecated_v1
def testGetDataAverage(self):
self._runGetDataTest("average", 10)
@test_util.run_deprecated_v1
def testGetDataAverageLongWindow(self):
self._runGetDataTest("average", 30)
@test_util.run_deprecated_v1
def testGetDataMfcc(self):
self._runGetDataTest("mfcc", 30)
@test_util.run_deprecated_v1
def testGetDataMicro(self):
self._runGetDataTest("micro", 20)
@test_util.run_deprecated_v1
def testGetUnprocessedData(self):
tmp_dir = self.get_temp_dir()
wav_dir = os.path.join(tmp_dir, "wavs")
os.mkdir(wav_dir)
self._saveWavFolders(wav_dir, ["a", "b", "c"], 100)
model_settings = {
"desired_samples": 160,
"fingerprint_size": 40,
"label_count": 4,
"window_size_samples": 100,
"window_stride_samples": 100,
"fingerprint_width": 40,
"preprocess": "mfcc",
}
audio_processor = input_data.AudioProcessor("", wav_dir, 10, 10, ["a", "b"],
10, 10, model_settings, tmp_dir)
result_data, result_labels = audio_processor.get_unprocessed_data(
10, model_settings, "training")
self.assertEqual(10, len(result_data))
self.assertEqual(10, len(result_labels))
@test_util.run_deprecated_v1
def testGetFeaturesForWav(self):
tmp_dir = self.get_temp_dir()
wav_dir = os.path.join(tmp_dir, "wavs")
os.mkdir(wav_dir)
self._saveWavFolders(wav_dir, ["a", "b", "c"], 1)
desired_samples = 1600
model_settings = {
"desired_samples": desired_samples,
"fingerprint_size": 40,
"label_count": 4,
"window_size_samples": 100,
"window_stride_samples": 100,
"fingerprint_width": 40,
"average_window_width": 6,
"preprocess": "average",
}
with self.cached_session() as sess:
audio_processor = input_data.AudioProcessor(
"", wav_dir, 10, 10, ["a", "b"], 10, 10, model_settings, tmp_dir)
sample_data = np.zeros([desired_samples, 1])
for i in range(desired_samples):
phase = i % 4
if phase == 0:
sample_data[i, 0] = 0
elif phase == 1:
sample_data[i, 0] = -1
elif phase == 2:
sample_data[i, 0] = 0
elif phase == 3:
sample_data[i, 0] = 1
test_wav_path = os.path.join(tmp_dir, "test_wav.wav")
input_data.save_wav_file(test_wav_path, sample_data, 16000)
results = audio_processor.get_features_for_wav(test_wav_path,
model_settings, sess)
spectrogram = results[0]
self.assertEqual(1, spectrogram.shape[0])
self.assertEqual(16, spectrogram.shape[1])
self.assertEqual(11, spectrogram.shape[2])
self.assertNear(0, spectrogram[0, 0, 0], 0.1)
self.assertNear(200, spectrogram[0, 0, 5], 0.1)
def testGetFeaturesRange(self):
model_settings = {
"preprocess": "average",
}
features_min, _ = input_data.get_features_range(model_settings)
self.assertNear(0.0, features_min, 1e-5)
def testGetMfccFeaturesRange(self):
model_settings = {
"preprocess": "mfcc",
}
features_min, features_max = input_data.get_features_range(model_settings)
self.assertLess(features_min, features_max)
if __name__ == "__main__":
test.main()
|
import os
os.chdir('zip_bomb')
for i in range(10):
os.system('cp data z{}'.format(i))
os.system('tar -cjf bomb.tar.bz z*')
os.system('rm z*')
for i in range(10):
for j in range(10):
os.system('cp bomb.tar.bz z{}.tar.bz'.format(j))
os.system('rm bomb.tar.bz')
os.system('tar -cjf bomb.tar.bz z*')
os.system('rm z*')
|
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Creates and runs TF2 object detection models.
For local training/evaluation run:
PIPELINE_CONFIG_PATH=path/to/pipeline.config
MODEL_DIR=/tmp/model_outputs
NUM_TRAIN_STEPS=10000
SAMPLE_1_OF_N_EVAL_EXAMPLES=1
python model_main_tf2.py -- \
--model_dir=$MODEL_DIR --num_train_steps=$NUM_TRAIN_STEPS \
--sample_1_of_n_eval_examples=$SAMPLE_1_OF_N_EVAL_EXAMPLES \
--pipeline_config_path=$PIPELINE_CONFIG_PATH \
--alsologtostderr
"""
from absl import flags
import tensorflow.compat.v2 as tf
from object_detection import model_lib_v2
import os
flags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config '
'file.')
flags.DEFINE_integer('num_train_steps', None, 'Number of train steps.')
flags.DEFINE_bool('eval_on_train_data', False, 'Enable evaluating on train '
'data (only supported in distributed training).')
flags.DEFINE_integer('sample_1_of_n_eval_examples', None, 'Will sample one of '
'every n eval input examples, where n is provided.')
flags.DEFINE_integer('sample_1_of_n_eval_on_train_examples', 5, 'Will sample '
'one of every n train input examples for evaluation, '
'where n is provided. This is only used if '
'`eval_training_data` is True.')
flags.DEFINE_string(
'model_dir', None, 'Path to output model directory '
'where event and checkpoint files will be written.')
flags.DEFINE_string(
'checkpoint_dir', None, 'Path to directory holding a checkpoint. If '
'`checkpoint_dir` is provided, this binary operates in eval-only mode, '
'writing resulting metrics to `model_dir`.')
flags.DEFINE_integer('eval_timeout', 3600, 'Number of seconds to wait for an'
'evaluation checkpoint before exiting.')
flags.DEFINE_bool('use_tpu', False, 'Whether the job is executing on a TPU.')
flags.DEFINE_string(
'tpu_name',
default=None,
help='Name of the Cloud TPU for Cluster Resolvers.')
flags.DEFINE_integer(
'num_workers', 1, 'When num_workers > 1, training uses '
'MultiWorkerMirroredStrategy. When num_workers = 1 it uses '
'MirroredStrategy.')
flags.DEFINE_integer(
'checkpoint_every_n', 1000, 'Integer defining how often we checkpoint.')
flags.DEFINE_boolean('record_summaries', True,
('Whether or not to record summaries defined by the model'
' or the training pipeline. This does not impact the'
' summaries of the loss values which are always'
' recorded.'))
FLAGS = flags.FLAGS
def main(unused_argv):
flags.mark_flag_as_required('model_dir')
flags.mark_flag_as_required('pipeline_config_path')
tf.config.set_soft_device_placement(True)
if FLAGS.checkpoint_dir:
model_lib_v2.eval_continuously(
pipeline_config_path=FLAGS.pipeline_config_path,
model_dir=FLAGS.model_dir,
train_steps=FLAGS.num_train_steps,
sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples,
sample_1_of_n_eval_on_train_examples=(
FLAGS.sample_1_of_n_eval_on_train_examples),
checkpoint_dir=FLAGS.checkpoint_dir,
wait_interval=300, timeout=FLAGS.eval_timeout)
else:
if FLAGS.use_tpu:
# TPU is automatically inferred if tpu_name is None and
# we are running under cloud ai-platform.
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.experimental.TPUStrategy(resolver)
elif FLAGS.num_workers > 1:
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
else:
strategy = tf.compat.v2.distribute.MirroredStrategy()
with strategy.scope():
model_lib_v2.train_loop(
pipeline_config_path=FLAGS.pipeline_config_path,
model_dir=FLAGS.model_dir,
train_steps=FLAGS.num_train_steps,
use_tpu=FLAGS.use_tpu,
checkpoint_every_n=FLAGS.checkpoint_every_n,
record_summaries=FLAGS.record_summaries)
if __name__ == '__main__':
#os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
tf.compat.v1.app.run()
|
# -*- coding: utf-8 -*-
"""Adds CreateView and related functionality to SQLAlchemy"""
from sqlalchemy_views import metadata
from sqlalchemy_views.views import CreateView, DropView # noqa
__version__ = metadata.version
__author__ = metadata.authors[0]
__license__ = metadata.license
__copyright__ = metadata.copyright
|
#!/usr/bin/env python
from setuptools import setup
setup(name='generic_celery_task',
version='0.3',
description='A workaround for the lack of dynamic tasks in Celery',
long_description=open("README.rst").read(),
author='Stefan Talpalaru',
author_email='stefantalpalaru@yahoo.com',
url='https://github.com/stefantalpalaru/generic_celery_task',
license = 'BSD',
packages=['generic_celery_task'],
test_suite = 'nose.collector',
install_requires=['celery'],
tests_require=['nose', 'redis'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
'Topic :: System :: Distributed Computing',
'Topic :: Software Development :: Object Brokering',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: CPython',
'Operating System :: POSIX',
],
)
|
# Adventure 3: buildStreet.py
# From the book: "Adventures in Minecraft", 2nd Edition
# written by David Whale and Martin O'Hanlon, Wiley, 2017
# http://eu.wiley.com/WileyCDA/WileyTitle/productCd-1119439582.html
#
# This program builds a street of identical houses.
# It uses a for loop.
# Import necessary modules
import mcpi.minecraft as minecraft
import mcpi.block as block
# Connect to Minecraft
mc = minecraft.Minecraft.create()
# A constant, that sets the size of your house
SIZE = 20
# define a new function, that builds a house
def house():
# Calculate the midpoints of the front face of the house
midx = x+SIZE/2
midy = y+SIZE/2
# Build the outer shell of the house
mc.setBlocks(x, y, z, x+SIZE, y+SIZE, z+SIZE, block.COBBLESTONE.id)
# Carve the insides out with AIR
mc.setBlocks(x+1, y, z+1, x+SIZE-1, y+SIZE-1, z+SIZE-1, block.AIR.id)
# Carve out a space for the doorway
mc.setBlocks(midx-1, y, z, midx+1, y+3, z, block.AIR.id)
# Carve out the left hand window
mc.setBlocks(x+3, y+SIZE-3, z, midx-3, midy+3, z, block.GLASS.id)
# Carve out the right hand window
mc.setBlocks(midx+3, y+SIZE-3, z, x+SIZE-3, midy+3, z, block.GLASS.id)
# Add a wooden roof
mc.setBlocks(x, y+SIZE, z, x+SIZE, y+SIZE, z+SIZE, block.WOOD.id)
# Add a woolen carpet, the colour is 14, which is red.
mc.setBlocks(x+1, y-1, z+1, x+SIZE-2, y-1, z+SIZE-2, block.WOOL.id, 14)
# Get the players position
pos = mc.player.getTilePos()
# Decide where to start building the house, slightly away from player
x = pos.x + 2
y = pos.y
z = pos.z
# build 5 houses, for a whole street of houses
for h in range(5):
# build one house
house()
# move x by the size of the house just built
x = x + SIZE
# END
|
#!/usr/bin/python2.7
"""
Read a maf file from stdin and write out a new maf with only blocks having all
of the required in species, after dropping any other species and removing
columns containing only gaps.
usage: %prog species,species2,... < maf
"""
import psyco_full
import bx.align.maf
import copy
import sys
from itertools import *
def main():
species = sys.argv[1].split( ',' )
maf_reader = bx.align.maf.Reader( sys.stdin )
maf_writer = bx.align.maf.Writer( sys.stdout )
for m in maf_reader:
new_components = []
for comp in m.components:
if comp.src.split( '.' )[0] in species:
new_components.append( comp )
m.components = new_components
m.remove_all_gap_columns()
if len( m.components ) > 1:
maf_writer.write( m )
maf_reader.close()
maf_writer.close()
if __name__ == "__main__":
main()
|
from __future__ import print_function
from dace.codegen import cppunparse
import six
def test_py2cpp(func, expected_string):
result = cppunparse.py2cpp(func)
if result != expected_string:
print("ERROR in py2cpp, expected:\n%s\n\ngot:\n%s\n" %
(expected_string, result))
return False
return True
def test_pyexpr2cpp(func, expected_string):
result = cppunparse.pyexpr2cpp(func)
if result != expected_string:
print("ERROR in pyexpr2cpp, expected:\n%s\n\ngot:\n%s\n" %
(expected_string, result))
return False
return True
def gfunc(woo):
i = 0
result = 0
while i < woo and i > 0:
for j in range(i):
result += (2 // 1)**j
return result
if __name__ == '__main__':
print('cppunparse unit test')
success = True
success &= test_py2cpp(
"""def notype(a, b):
a = a + 5
c = a + b
return c*b
""", """auto notype(auto a, auto b) {
a = (a + 5);
auto c = (a + b);
return (c * b);
}""")
if six.PY3:
success &= test_py2cpp(
"""def typed(a: int, b: float) -> float:
c = a + b
return c*b
""", """float typed(int a, float b) {
auto c = (a + b);
return (c * b);
}""")
# Ternary operators, strings
success &= test_py2cpp("""printf('%f\\n', a if b else c);""",
"""printf("%f\\n", (b ? a : c));""")
# Global functions, operators
success &= test_py2cpp(
gfunc, """auto gfunc(auto woo) {
auto i = 0;
auto result = 0;
while (((i < woo) && (i > 0))) {
for (auto j : range(i)) {
result += dace::math::pow(dace::math::ifloor(2 / 1), j);
}
}
return result;
}""")
def lfunc():
exit(1 >> 3)
# Local functions
success &= test_py2cpp(lfunc, """auto lfunc() {
exit((1 >> 3));
}""")
# void return value
if six.PY3:
success &= test_py2cpp("""
def lfunc() -> None:
exit(1 >> 3)
""", """void lfunc() {
exit((1 >> 3));
}""")
# Local variable tracking
success &= test_py2cpp('l = 1 + a; l = l + 8;', """auto l = (1 + a);
l = (l + 8);""")
# Operations (augmented assignment)
if six.PY3:
success &= test_py2cpp('l *= 3; l //= 8', """l *= 3;
l = dace::math::ifloor(l / 8);""")
success &= test_pyexpr2cpp('a << 3', '(a << 3)')
# Array assignment
success &= test_py2cpp('A[i] = b[j]', """A[i] = b[j];""")
print('Result: %s' % ('PASSED' if success else 'FAILED'))
if not success:
exit(1)
|
from manimlib.imports import *
from accalib.electrical_circuits import BatteryLampCircuit, BatteryLampCircuitAC
from accalib.particles import Electron
from accalib.lines import DottedLine
from accalib.tools import rule_of_thirds_guide
class IntroPhasorsPart(Scene):
def construct(self):
section_label = TextMobject(
"Part 4: \\\\",
"Imaginary Voltage?"
).scale(1.5)
self.play(
Write(section_label[0])
)
self.wait()
self.play(
Write(section_label[1])
)
self.wait(1)
class ImaginaryVoltageCircuit(Scene):
CONFIG = {
"current_color": GREEN_D, # GREEN_C
"voltage_color": RED_D, # RED_A,RED_B,
"resistance_color": ORANGE,
"circuit_scale": 0.76
}
def construct(self):
# self.add(
# Rectangle(
# width=FRAME_WIDTH,
# height=FRAME_HEIGHT,
# color=PURPLE
# )
# )
# add equation
color_map = {
"V": self.voltage_color,
"I": self.current_color,
"R": self.resistance_color
}
equation = TexMobject(
"I = {V\\overR}",
tex_to_color_map=color_map
) \
.scale(3) \
.to_edge(UP)\
.shift(1*LEFT)
self.play(
FadeIn(equation[2])
)
self.wait(4.22)
self.play(
FadeIn(equation[:2]),
FadeIn(equation[3:])
)
self.wait(2.04)
# add first circuit
circuit1 = BatteryLampCircuit() \
.scale(self.circuit_scale) \
.to_corner(DL, buff=0) \
.shift(2.5*RIGHT+1*UP)
circuit1.setup_electrons()
cover_rect1 = SurroundingRectangle(
circuit1,
fill_opacity=1,
fill_color=BLACK,
stroke_opacity=0
)
# setup circuit labels
point1 = circuit1.electron_vect_inter.interpolate(0.55)
point2 = circuit1.electron_vect_inter.interpolate(0.5)
angle = np.arccos((point2[0] - point1[0]) / np.linalg.norm(point2 - point1))
current_arrow_1 = ArrowTip(
start_angle=-1 * angle,
color=self.current_color
) \
.scale(2.5) \
.move_to(point1 + 0.05 * UR)
i_label1, i_value1 = self.get_label("I", 2, "A", color=self.current_color)
i_label1.next_to(current_arrow_1, direction=UR, buff=0) \
.shift(0 * RIGHT)
# write voltage of circuit
v_label1, v_value1 = self.get_label("V", 12, "V", color=self.voltage_color)
v_label1.next_to(circuit1.battery, direction=UL, buff=0) \
.shift(0.25 * UP + 0.4 * RIGHT)
# write resistance of circuit
r_label1, r_value1 = self.get_label("R", 6, "\\Omega", color=self.resistance_color)
r_label1.next_to(circuit1.light_bulb, direction=DOWN, buff=0.2) \
.shift(0 * RIGHT)
self.add(circuit1, r_label1, cover_rect1)
# add second circuit
circuit2 = BatteryLampCircuit(
electron_freq=0.33
) \
.scale(self.circuit_scale) \
.to_corner(DR, buff=0) \
.shift(0.8 * LEFT + 1 * UP)
circuit2.setup_electrons()
cover_rect2 = SurroundingRectangle(
circuit2,
fill_opacity=1,
fill_color=BLACK,
stroke_opacity=0
)
# setup circuit labels
point1 = circuit2.electron_vect_inter.interpolate(0.55)
point2 = circuit2.electron_vect_inter.interpolate(0.5)
angle = np.arccos((point2[0] - point1[0]) / np.linalg.norm(point2 - point1))
current_arrow_2 = ArrowTip(
start_angle=-1 * angle,
color=self.current_color
) \
.scale(2.5) \
.move_to(point1 + 0.05 * UR)
i_label2, i_value2 = self.get_label("I", 4, "A", color=self.current_color)
i_label2.next_to(current_arrow_2, direction=UR, buff=0) \
.shift(0 * RIGHT)
# write voltage of circuit
v_label2, v_value2 = self.get_label("V", 24, "V", color=self.voltage_color)
v_label2.next_to(circuit2.battery, direction=UL, buff=0) \
.shift(0.25 * UP)
# write resistance of circuit
r_label2, r_value2 = self.get_label("R", 6, "\\Omega", color=self.resistance_color)
r_label2.next_to(circuit2.light_bulb, direction=DOWN, buff=0.2) \
.shift(0 * RIGHT)
self.add(circuit2, r_label2, cover_rect2)
self.play(
FadeOut(cover_rect2),
FadeOut(cover_rect1),
circuit1.get_electron_anim(1.04),
circuit2.get_electron_anim(1.04),
)
# FadeIn voltages
self.play(
FadeIn(v_label1),
circuit1.get_electron_anim(2),
circuit2.get_electron_anim(2),
)
self.play(
FadeIn(v_label2),
circuit1.get_electron_anim(3.22),
circuit2.get_electron_anim(3.22),
)
# FadeIn currents
self.play(
FadeInFrom(i_label1, direction=UP),
FadeInFrom(current_arrow_1, direction=UP),
FadeInFrom(i_label2, direction=UP),
FadeInFrom(current_arrow_2, direction=UP),
circuit1.get_electron_anim(1),
circuit2.get_electron_anim(1),
)
# label currents
in_kw = {
'stroke_opacity': 1,
'stroke_color': YELLOW,
'fill_opacity': 0.2,
'fill_color': YELLOW
}
current_rects = VGroup(
SurroundingRectangle(i_label1, **in_kw),
SurroundingRectangle(i_label2, **in_kw),
)
self.play(
FadeIn(current_rects),
circuit1.get_electron_anim(2),
circuit2.get_electron_anim(2),
)
# fadeout current labels
self.play(
FadeOut(current_rects, run_time=0.87),
circuit1.get_electron_anim(0.87),
circuit2.get_electron_anim(0.87),
)
# replace 24 volts with 12 j volts
new_voltage = TexMobject(
"12\\textbf{j} V",
color=self.voltage_color,
tex_to_color_map={
"j": PURPLE
}
)\
.scale(1.25)\
.move_to(v_label2[1].get_center())
new_voltage.submobjects[0].shift(0.4*RIGHT+0.05*DOWN)
new_voltage.submobjects[1].shift(0.25*RIGHT+0.05*DOWN)
new_voltage.submobjects[2].shift(0.05*DOWN)
v_label2[1].clear_updaters()
unknown_current = TextMobject(
"???A",
color=self.current_color
)\
.scale(1.25)\
.move_to(i_label2[1].get_center() + 0.2*RIGHT)
i_label2[1].clear_updaters()
self.play(
Transform(
v_label2[1], new_voltage
),
Transform(
i_label2[1], unknown_current
),
circuit1.get_electron_anim(2),
circuit2.get_electron_anim(2),
)
# show j definition
j_text = TexMobject(
"\\textbf{j}", "=\\sqrt{-1}",
tex_to_color_map={
"\\textbf{j}": PURPLE
}
) \
.scale(1.4) \
.to_edge(UL) \
.shift(0.75 * RIGHT + 1.5*DOWN)
j_rect = SurroundingRectangle(
j_text,
buff=0.5,
color=YELLOW
)
self.play(
Write(j_text),
Write(j_rect),
circuit1.get_electron_anim(4.3),
circuit2.get_electron_anim(4.3),
)
# transform ??? A to (12 j) / 6
rhs_complex = TexMobject(
"{12 \\textbf{j} \\over 6}",
color=self.current_color,
tex_to_color_map={
"12": self.voltage_color,
"\\textbf{j}": PURPLE,
"6": self.resistance_color
}
)\
.scale(1.25)\
.move_to(i_label2[1].get_center() + 0.1*LEFT)
self.play(
Transform(
i_label2[1], rhs_complex
),
circuit1.get_electron_anim(2),
circuit2.get_electron_anim(2),
)
# transform (12 j) / 6 to 2 j A
final_current = TexMobject(
"2 \\textbf{j} A",
color=self.current_color,
tex_to_color_map={
"\\textbf{j}": PURPLE
}
) \
.scale(1.25) \
.move_to(i_label2[1].get_center() + 0.1 * LEFT)
final_current.submobjects[1].shift(0.05*RIGHT)
final_current.submobjects[2].shift(0.1*RIGHT)
self.play(
Transform(
i_label2[1], final_current
),
circuit1.get_electron_anim(3.04),
circuit2.get_electron_anim(3.04),
)
# note the imaginary current
imag_current = TextMobject(
"Imaginary Current???",
color=YELLOW
)\
.scale(1.25)\
.to_corner(UR, buff=0)\
.shift(3.2*DOWN+0.1*LEFT)
imag_current_arrow = Arrow(
imag_current.get_bottom(),
final_current.get_top(),
color=YELLOW
)
self.play(
ShowCreation(imag_current_arrow),
Write(imag_current),
circuit1.get_electron_anim(6.17),
circuit2.get_electron_anim(6.17),
)
# show electrons per second
elec_per_sec = TexMobject(
"6246000000000000\\textbf{j}",
color=self.current_color,
tex_to_color_map={
"\\textbf{j}": PURPLE
}
) \
.scale(1.3) \
.move_to(imag_current)\
.shift(1.25*LEFT)
elec_per_sec_unit_tex = TexMobject(
"{1", "\\over", "\\text{second}}"
) \
.scale(1) \
.next_to(elec_per_sec, direction=RIGHT)
elec_per_sec_unit_elec = Electron() \
.scale(0.3) \
.move_to(elec_per_sec_unit_tex[0])
elec_per_sec_unit = VGroup(
elec_per_sec_unit_tex, elec_per_sec_unit_elec
)
self.play(
Transform(
imag_current,
VGroup(elec_per_sec, elec_per_sec_unit)
),
circuit1.get_electron_anim(16.96),
circuit2.get_electron_anim(16.96),
)
def get_label(self, text, initial_value, unit, **kwargs):
lhs = TextMobject(text, "=", **kwargs)\
.scale(1.25)
decimal_num = DecimalNumber(
initial_value,
num_decimal_places=0,
unit=unit,
**kwargs
) \
.scale(1.25)\
.next_to(lhs, direction=RIGHT)
value = ValueTracker(initial_value)
decimal_num.add_updater(
lambda x: x.set_value(value.get_value())
)
return VGroup(lhs, decimal_num), value
class ACvsDC(Scene):
CONFIG = {
"current_color": GREEN_D, # GREEN_C
"voltage_color": RED_D, # RED_A,RED_B,
"resistance_color": ORANGE,
"electron_freq_0": 0.11,
"electron_freq_1": 0.5,
"ac_electron_freq": 2,
"circuit_scale": 0.95,
"eps_scale": 1.3
}
def construct(self):
# write dc
dc_title = TextMobject("Direct Current(", "DC", ")") \
.scale(1.25)
dc_title.move_to(
FRAME_WIDTH * 0.25 * LEFT + FRAME_HEIGHT * 0.5 * UP + dc_title.get_height() * 0.5 * DOWN + 0.2 * DOWN)
dc_underline = Line(LEFT, RIGHT) \
.match_width(dc_title) \
.scale(1) \
.next_to(dc_title, DOWN, SMALL_BUFF)
self.play(
Write(dc_title[1], run_time=0.5),
)
# write ac
ac_title = TextMobject("Alternating Current(", "AC", ")") \
.scale(1.25)
ac_title.move_to(
FRAME_WIDTH * 0.25 * RIGHT + FRAME_HEIGHT * 0.5 * UP + ac_title.get_height() * 0.5 * DOWN + 0.2 * DOWN)
ac_underline = Line(LEFT, RIGHT) \
.match_width(ac_title) \
.scale(1) \
.next_to(ac_title, DOWN, SMALL_BUFF)
self.play(
Write(ac_title[1], run_time=0.5)
)
self.wait(2.08)
# show direct current
self.play(
Write(VGroup(dc_title[0], dc_title[2])),
ShowCreation(dc_underline)
)
# fade in dc circuit
dc_circuit = BatteryLampCircuit(
electron_freq=self.electron_freq_0
) \
.scale(self.circuit_scale) \
.to_corner(UL, buff=0) \
.shift(2.5 * DOWN + 0.2 * RIGHT)
dc_circuit.setup_electrons()
block_rect = Rectangle(
fill_opacity=1,
fill_color=BLACK,
stroke_opacity=0
) \
.set_width(dc_circuit.get_width() * 1.3, stretch=True) \
.set_height(dc_circuit.get_height() * 0.8, stretch=True) \
.move_to(dc_circuit.get_center())
# show current label
point1 = dc_circuit.electron_vect_inter.interpolate(0.55)
point2 = dc_circuit.electron_vect_inter.interpolate(0.5)
angle = np.arccos((point2[0] - point1[0]) / np.linalg.norm(point2 - point1))
current_arrow = ArrowTip(
start_angle=-1 * angle,
color=self.current_color
) \
.scale(2.5) \
.move_to(point1 + 0.05 * UR)
current_text = TextMobject(
"current", "=",
color=self.current_color) \
.next_to(current_arrow, direction=UR) \
.shift(0 * RIGHT) \
.scale(1.5)
current_value = DecimalNumber(
1,
unit="A",
color=self.current_color,
num_decimal_places=2
) \
.scale(1.5) \
.next_to(current_text, direction=RIGHT, buff=0.3)
current_tracker = ValueTracker(1)
current_value.add_updater(
lambda x:
x.set_value(current_tracker.get_value())
)
self.add(dc_circuit, current_arrow, current_text, current_value, block_rect)
# label equivalent electrons per second
elec_per_sec = DecimalNumber(
6246000000000000,
num_decimal_places=0,
color=self.current_color,
edge_to_fix=RIGHT
) \
.scale(self.eps_scale) \
.to_corner(DL) \
.shift(0.3 * LEFT)
elec_per_sec_tracker = ValueTracker(6246000000000000)
elec_per_sec.add_updater(
lambda x: x.set_value(elec_per_sec_tracker.get_value())
)
elec_per_sec_unit_tex = TexMobject(
"{1", "\\over", "\\text{second}}"
) \
.scale(1.15) \
.next_to(elec_per_sec, direction=RIGHT)
elec_per_sec_unit_elec = Electron() \
.scale(0.3) \
.move_to(elec_per_sec_unit_tex[0])
elec_per_sec_unit = VGroup(
elec_per_sec_unit_tex, elec_per_sec_unit_elec
)
self.play(
FadeOut(block_rect),
FadeIn(elec_per_sec),
FadeIn(elec_per_sec_unit),
dc_circuit.get_electron_anim(5)
)
# add dividing line
dividing_line = DashedLine(
start=FRAME_HEIGHT * 0.5 * DOWN,
end=FRAME_HEIGHT * 0.5 * UP,
dash_length=0.25
)
self.play(
ShowCreation(dividing_line),
dc_circuit.get_electron_anim(2.87)
)
# show alternating current
self.play(
Write(VGroup(ac_title[0], ac_title[2])),
ShowCreation(ac_underline),
dc_circuit.get_electron_anim(1)
)
# fade in ac circuit
ac_circuit = BatteryLampCircuitAC(
electron_freq=self.ac_electron_freq
) \
.scale(self.circuit_scale) \
.to_corner(UR, buff=0) \
.shift(2.5 * DOWN + 1.0 * LEFT)
ac_circuit.setup_electrons()
block_rect_ac = Rectangle(
fill_opacity=1,
fill_color=BLACK,
stroke_opacity=0,
width=7.7,
height=6
) \
.move_to(ac_circuit.get_center())
# show current label
point1 = ac_circuit.electron_vect_inter.interpolate(0.55)
point2 = ac_circuit.electron_vect_inter.interpolate(0.5)
angle = np.arccos((point2[0] - point1[0]) / np.linalg.norm(point2 - point1))
current_arrow_ac = ArrowTip(
start_angle=-1 * angle,
color=self.current_color
) \
.scale(2.5) \
.move_to(point1 + 0.05 * UR)
current_text_ac = TextMobject(
"current", "=",
color=self.current_color) \
.next_to(current_arrow_ac, direction=UR) \
.shift(0.5 * LEFT) \
.scale(1.5)
current_value_ac = DecimalNumber(
1,
unit="A",
color=self.current_color,
num_decimal_places=2
) \
.scale(1.5) \
.next_to(current_text_ac, direction=RIGHT, buff=0.3)
phase_tracker_ac = ValueTracker(0)
current_value_ac.add_updater(
lambda x:
x.set_value(np.sin(phase_tracker_ac.get_value()))
)
self.add(ac_circuit, current_arrow_ac, current_text_ac, current_value_ac, block_rect_ac)
# label equivalent electrons per second
elec_per_sec_ac = DecimalNumber(
6246000000000000,
num_decimal_places=0,
color=self.current_color,
edge_to_fix=RIGHT
) \
.scale(self.eps_scale) \
.to_corner(DR) \
.shift(1.5 * LEFT)
elec_per_sec_ac.add_updater(
lambda x: x.set_value(6246000000000000 * np.sin(phase_tracker_ac.get_value()))
)
elec_per_sec_unit_tex_ac = TexMobject(
"{1", "\\over", "\\text{second}}"
) \
.scale(1.15) \
.next_to(elec_per_sec_ac, direction=RIGHT)
elec_per_sec_unit_elec_ac = Electron() \
.scale(0.3) \
.move_to(elec_per_sec_unit_tex_ac[0])
elec_per_sec_unit_ac = VGroup(
elec_per_sec_unit_tex_ac, elec_per_sec_unit_elec_ac
)
block_rect_2 = Rectangle(
fill_opacity=1,
fill_color=BLACK,
stroke_opacity=0,
width=8.5,
height=3
) \
.move_to(VGroup(elec_per_sec_ac, elec_per_sec_unit_ac).get_center())
self.add(elec_per_sec_ac, elec_per_sec_unit_ac, block_rect_2)
self.play(
FadeOut(block_rect_2),
FadeOut(block_rect_ac),
ac_circuit.get_electron_anim(7.57),
dc_circuit.get_electron_anim(7.57),
ApplyMethod(
phase_tracker_ac.increment_value,
7.57 * self.ac_electron_freq,
run_time=7.57,
rate_func=linear
),
)
class ACDCApplications(Scene):
CONFIG={
"circuit_color": BLUE_C
}
def construct(self):
# add line dividing screen
dividing_line = DashedLine(
start=FRAME_HEIGHT * 0.5 * DOWN,
end=FRAME_HEIGHT * 0.5 * UP,
dash_length=0.25
)
self.add(dividing_line)
# dc title
dc_title = TextMobject("Direct Current", "(DC)")\
.scale(1.25)
dc_title.move_to(
FRAME_WIDTH * 0.25 * LEFT + FRAME_HEIGHT * 0.5 * UP + dc_title.get_height() * 0.5 * DOWN + 0.2 * DOWN)
dc_underline = Line(LEFT, RIGHT) \
.match_width(dc_title) \
.scale(1) \
.next_to(dc_title, DOWN, SMALL_BUFF)
self.add(dc_title, dc_underline)
# ac title
ac_title = TextMobject("Alternating Current", "(AC)")\
.scale(1.25)
ac_title.move_to(
FRAME_WIDTH * 0.25 * RIGHT + FRAME_HEIGHT * 0.5 * UP + ac_title.get_height() * 0.5 * DOWN + 0.2 * DOWN)
ac_underline = Line(LEFT, RIGHT) \
.match_width(ac_title) \
.scale(1) \
.next_to(ac_title, DOWN, SMALL_BUFF)
self.add(ac_title, ac_underline)
self.wait(4.96)
# phone
phone = ImageMobject(
"images\ep1\IntroduceACDC\cell-phone.png"
)\
.scale(2.2)\
.to_edge(LEFT, buff=-0.6)\
.shift(1*UP)
self.play(
FadeIn(phone)
)
# car
car = ImageMobject(
"images\ep1\IntroduceACDC\car.png"
)\
.scale(1.5)\
.next_to(phone, direction=RIGHT, buff=-1.5)
# .shift(2*DOWN)
self.play(
FadeIn(car)
)
self.wait(5.9)
# power lines
power_line = ImageMobject(
"images\ep1\IntroduceACDC\power_line5.jpg"
)\
.scale(2.7)\
.to_edge(RIGHT, buff=0.5)\
.shift(1*UP)
self.play(
FadeIn(power_line)
)
self.wait(3.06)
# outlet
outlet = ImageMobject(
"images\ep1\IntroduceACDC\outlet-US.jpg"
) \
.scale(2)\
.next_to(power_line, direction=LEFT, buff=0.5)
self.play(
FadeIn(outlet)
)
self.wait(4)
class SineWaveCharacteristics(ACvsDC):
CONFIG={
"axes_config": {
"number_line_config": {
"include_tip": False,
},
"x_axis_config": {
"color": BLUE_C,
},
"y_axis_config": {
"color": BLUE_C,
},
"x_min": 0,
"x_max": 7,
"y_min": -2.5,
"y_max": 2.5,
"center_point": RIGHT_SIDE + 7 * LEFT + 0.5 * UP,
},
"ac_electron_freq": 1,
"amplitude_color": ORANGE,
"ang_freq_color": BLUE_C,
"phase_color": RED_B,
"base_electron_freq": 1,
"base_electron_amplitude": 0.15
}
def construct(self):
self.add(
Rectangle(
width=FRAME_WIDTH,
height=FRAME_HEIGHT,
color=PURPLE
)
)
# fade in ac circuit
self.ac_circuit = BatteryLampCircuitAC(
electron_freq=self.base_electron_freq,
electron_amplitude=self.base_electron_amplitude
) \
.scale(1.0) \
.to_edge(LEFT, buff=0.1)
self.amplitude_value = ValueTracker(1)
self.freq_value = ValueTracker(1)
self.phase_value = ValueTracker(0)
self.ac_circuit.setup_electrons()
point1 = self.ac_circuit.electron_vect_inter.interpolate(0.55)
point2 = self.ac_circuit.electron_vect_inter.interpolate(0.5)
angle = np.arccos((point2[0] - point1[0]) / np.linalg.norm(point2 - point1))
current_arrow_ac = ArrowTip(
start_angle=-1 * angle,
color=self.current_color
) \
.scale(2.5) \
.move_to(point1 + 0.05 * UR)
current_text_ac = TextMobject(
"current", "=",
color=self.current_color) \
.next_to(current_arrow_ac, direction=UR) \
.shift(0.5 * LEFT) \
.scale(1.5)
current_value_ac = DecimalNumber(
1,
unit="A",
color=self.current_color,
num_decimal_places=2
) \
.scale(1.5) \
.next_to(current_text_ac, direction=RIGHT, buff=0.3)
current_value_ac.add_updater(
lambda x:
x.set_value((1/self.base_electron_amplitude) * self.ac_circuit.get_instantaneous_current())
)
self.add(self.ac_circuit, current_arrow_ac, current_text_ac, current_value_ac)
self.play(
self.ac_circuit.get_electron_anim(2*PI-1)
)
# add axis
self.time_axes = Axes(**self.axes_config)
y_label = self.time_axes.get_y_axis_label("\\text{I(t)}")\
.shift(0.5 * UP)\
.scale(1.3)\
.set_color(self.current_color)
time_label = self.time_axes.get_x_axis_label("\\text{time}").set_color(BLUE_C)
self.add(self.time_axes, time_label, y_label)
self.play(
FadeIn(self.time_axes),
FadeIn(y_label),
FadeIn(time_label),
self.ac_circuit.get_electron_anim(1)
)
#
# phase must be multiple of 2 pi at this point
#
# draw sine wave
graph_animated = self.time_axes.get_graph(
np.sin,
).set_color(self.current_color)
self.graph = self.time_axes.get_graph(
np.sin,
x_max=20
).set_color(self.current_color)
draw_line = Line(ORIGIN, ORIGIN, color=YELLOW, stroke_width=4)
draw_dot = Dot(ORIGIN, color=YELLOW)
self.ac_circuit.electron_time.set_value(0)
def line_update(line):
rvec = self.ac_circuit.electron_time.get_value()*RIGHT
uvec = (self.ac_circuit.get_instantaneous_current()/self.base_electron_amplitude)*UP
start = self.time_axes.center_point + rvec
end = self.time_axes.center_point + uvec + rvec + 0.00001*UP
line.put_start_and_end_on(start, end)
def dot_update(dot):
rvec = self.ac_circuit.electron_time.get_value() * RIGHT
uvec = (self.ac_circuit.get_instantaneous_current() / self.base_electron_amplitude) * UP
loc = self.time_axes.center_point + uvec + rvec + 0.00001 * UP
dot.move_to(loc)
self.play(
UpdateFromFunc(draw_line, line_update),
UpdateFromFunc(draw_dot, dot_update),
ShowCreation(
graph_animated,
run_time=7,
rate_func=linear
),
self.ac_circuit.get_electron_anim(8)
)
self.remove(graph_animated)
self.add(self.graph)
# add sine wave equation
equation = TexMobject(
"I(t) = sin(t)",
color=self.current_color,
)\
.scale(1.75)\
.to_edge(DOWN, buff=0.1)
self.play(
FadeInFrom(equation, direction=DOWN),
self.ac_circuit.get_electron_anim(8.48)
)
# show different sine waves
self.play(
self.get_amplitude_anim(2, 0.5),
)
self.play(
self.get_freq_anim(2, 0.5),
)
self.play(
self.get_amplitude_anim(0.25, 0.5),
)
self.play(
self.get_freq_anim(0.7, 0.5),
)
#set amplitude to 1
self.play(
self.get_amplitude_anim(1, 0.5),
)
# set freq to 1
self.play(
self.get_freq_anim(1, 0.5),
)
self.play(
self.ac_circuit.get_electron_anim(4.47),
)
# transform to general sine wave formula
equation_general = TexMobject(
"I(t) = A \\hspace{1mm} sin( \\hspace{1mm} \\omega \\hspace{1mm} t \\hspace{1mm} + \\hspace{1mm} \\phi \\hspace{1mm})",
color=self.current_color,
tex_to_color_map={
"A": self.amplitude_color,
"\\omega": self.ang_freq_color,
"\\phi": self.phase_color
},
substring_to_isolate=[
"A",
"\\omega",
"\\phi"
]
) \
.scale(1.75) \
.to_edge(DOWN, buff=0.1)
self.play(
Transform(equation, equation_general),
self.ac_circuit.get_electron_anim(2.65)
)
# add rectangles around parameters
rects_kw={
"buff": 0.15,
"color": YELLOW,
"stroke_width": 5
}
params = VGroup(
equation_general.get_part_by_tex("A"),
equation_general.get_part_by_tex("\\omega"),
equation_general.get_part_by_tex("\\phi"),
)
# rects[-1].shift(0.1*UP)
self.play(
*[
ShowCreationThenDestructionAround(rect)
for rect in params
],
self.ac_circuit.get_electron_anim(3.39)
)
# show amplitude braces with definition
brace_ampl = Brace(
equation_general.get_part_by_tex("A"),
direction=UP,
color=self.amplitude_color
)
brace_ampl_def = brace_ampl.get_text("Amplitude")\
.scale(1.5)\
.set_color(self.amplitude_color)
brace_ampl_val = DecimalNumber(
1,
color=self.amplitude_color,
)\
.scale(1.5)\
.move_to(brace_ampl_def.get_center())
self.play(
ShowCreation(brace_ampl),
Write(brace_ampl_def),
self.ac_circuit.get_electron_anim(6.13)
)
# convert amplitude to 1
self.play(
Transform(brace_ampl_def, brace_ampl_val),
self.ac_circuit.get_electron_anim(3)
)
brace_ampl_val.add_updater(
lambda x: x.set_value(self.amplitude_value.get_value())
)
# second call to add for updater
self.remove(brace_ampl_def)
self.add(brace_ampl_val)
# show dashed line showing amplitude
ampl_kw = {
"color": self.amplitude_color,
"dash_length": 0.05
}
def up_ampl_updator(line):
line.put_start_and_end_on(
self.time_axes.center_point + UP * self.amplitude_value.get_value(),
self.time_axes.center_point + UP * self.amplitude_value.get_value() + 8 * RIGHT
)
def down_ampl_updator(line):
line.put_start_and_end_on(
self.time_axes.center_point + DOWN * self.amplitude_value.get_value(),
self.time_axes.center_point + DOWN * self.amplitude_value.get_value() + 8 * RIGHT
)
ampl_lines = VGroup(
DashedLine(**ampl_kw).add_updater(up_ampl_updator, call_updater=True),
DashedLine(**ampl_kw).add_updater(down_ampl_updator, call_updater=True)
)
def up_ampl_label_updator(label):
label.next_to(
self.time_axes.center_point + UP * self.amplitude_value.get_value(),
direction=LEFT
)
label.set_value(self.amplitude_value.get_value())
def down_ampl_label_updator(label):
label.next_to(
self.time_axes.center_point + DOWN * self.amplitude_value.get_value(),
direction=LEFT
)
label.set_value(-1 * self.amplitude_value.get_value())
self.ampl_labels = VGroup(
DecimalNumber(**ampl_kw).add_updater(up_ampl_label_updator, call_updater=True),
DecimalNumber(**ampl_kw).add_updater(down_ampl_label_updator, call_updater=True),
)
self.play(
AnimationGroup(
ShowCreation(ampl_lines),
FadeInFrom(self.ampl_labels, direction=LEFT),
lag_ratio=0.8
),
self.ac_circuit.get_electron_anim(4.52)
)
# set amplitude to 2 A
self.play(
self.get_amplitude_anim(2),
self.ac_circuit.get_electron_anim(5)
)
# set amplitude to 0.25 A
self.play(
self.get_amplitude_anim(0.25),
self.ac_circuit.get_electron_anim(3.57)
)
# set amplitude to 1 A
self.play(
self.get_amplitude_anim(1),
self.ac_circuit.get_electron_anim(1.83)
)
# show freq braces with definition
brace_freq = Brace(
equation_general.get_part_by_tex("\\omega"),
direction=UP,
color=self.ang_freq_color
)
brace_freq_def = brace_freq.get_text("Angular Frequency") \
.scale(1.5) \
.set_color(self.ang_freq_color)
brace_freq_val = DecimalNumber(
1,
color=self.ang_freq_color,
) \
.scale(1.5) \
.move_to(brace_freq_def.get_center())
brace_freq_def.shift(2*RIGHT)
self.play(
ShowCreation(brace_freq),
Write(brace_freq_def),
self.ac_circuit.get_electron_anim(8.13)
)
# convert freq to 1
self.play(
Transform(brace_freq_def, brace_freq_val),
self.ac_circuit.get_electron_anim(3)
)
brace_freq_val.add_updater(
lambda x: x.set_value(self.freq_value.get_value())
)
# second call to add for updater
self.remove(brace_freq_def)
self.add(brace_freq_val)
# set freq to 2 A
self.play(
self.get_freq_anim(2),
self.ac_circuit.get_electron_anim(5)
)
# set freq to 0.5 A
self.play(
self.get_freq_anim(0.5),
self.ac_circuit.get_electron_anim(5)
)
# set freq to 1 A
self.play(
self.get_freq_anim(1),
self.ac_circuit.get_electron_anim(8.3)
)
# show phase braces with definition
brace_phase = Brace(
equation_general.get_part_by_tex("\\phi"),
direction=UP,
color=self.phase_color
)
brace_phase_def = brace_phase.get_text("Phase") \
.scale(1.5) \
.set_color(self.phase_color)
brace_phase_val = DecimalNumber(
0,
color=self.phase_color,
) \
.scale(1.5) \
.move_to(brace_phase_def.get_center())
self.play(
ShowCreation(brace_phase),
Write(brace_phase_def),
self.ac_circuit.get_electron_anim(4.65)
)
# add graph with negative portion so we can slide left and right
self.remove(self.graph)
self.ampl_labels.clear_updaters()
self.graph = self.time_axes.get_graph(
np.sin,
x_max=20,
x_min=-2.4
).set_color(self.current_color)
self.block_rect = Rectangle(
width=2.5,
height=3,
stroke_opacity=0,
fill_color=BLACK,
fill_opacity=1
) \
.next_to(self.time_axes.center_point, direction=LEFT, buff=0)
self.add(self.graph, self.block_rect, self.time_axes, self.ampl_labels)
# convert phase to 0
self.play(
Transform(brace_phase_def, brace_phase_val),
self.ac_circuit.get_electron_anim(3)
)
brace_phase_val.add_updater(
lambda x: x.set_value(self.phase_value.get_value())
)
# second call to add for updater
self.remove(brace_phase_def)
self.add(brace_phase_val)
# set phase to different values
self.play(
self.get_phase_anim(1)
)
self.play(
self.ac_circuit.get_electron_anim(5.52)
)
self.play(
self.get_phase_anim(-1)
)
self.play(
self.ac_circuit.get_electron_anim(1.65)
)
self.play(
self.get_phase_anim(0)
)
self.play(
self.ac_circuit.get_electron_anim(50)
)
def get_sin(self, ampl=1, ang_freq=1, phase=0):
return lambda t: ampl * np.sin(ang_freq*t + phase)
def get_phase_anim(self, new_phase, run_time=1.):
del_phase = new_phase - self.phase_value.get_value()
return AnimationGroup(
ApplyMethod(
self.graph.shift, -1*del_phase*RIGHT,
run_time=1
),
ApplyMethod(
self.phase_value.set_value, new_phase,
run_time=1
),
ApplyMethod(
self.block_rect.shift, 0.001*UP,
rate_func=there_and_back,
run_time=1
),
ApplyMethod(
self.ampl_labels.shift, 0.001*UP,
rate_func=there_and_back,
run_time=1
),
ApplyMethod(
self.time_axes.shift, 0.001 * UP,
rate_func=there_and_back,
run_time=1
),
self.ac_circuit.get_electron_anim(run_time=run_time)
)
def get_freq_anim(self, new_freq, run_time=1.):
self.ac_circuit.set_electron_freq_anim(new_freq * self.base_electron_freq, run_time=run_time)
cur_freq = self.freq_value.get_value()
transform_matrix = np.array(
[[cur_freq/new_freq, 0, 0],
[0, 1, 0],
[0, 0, 1]]
)
offset = np.array(
[(1-cur_freq/new_freq)*self.time_axes.center_point[0],
0,
0]
)
def fun(p):
return offset + transform_matrix.dot(p)
return AnimationGroup(
ApplyPointwiseFunction(
fun, self.graph,
run_time=run_time
),
self.ac_circuit.get_electron_anim(run_time=run_time),
ApplyMethod(
self.freq_value.set_value, new_freq,
run_time=run_time
),
lag_ratio=0
)
def get_amplitude_anim(self, new_ampl, run_time=1.):
self.ac_circuit.set_electron_amplitude_anim(new_ampl*self.base_electron_amplitude, run_time=run_time)
transform_matrix = np.array(
[[1, 0, 0],
[0, new_ampl/self.amplitude_value.get_value(), 0],
[0, 0, 1]]
)
def fun(p):
return self.time_axes.center_point + transform_matrix.dot(p-self.time_axes.center_point)
return AnimationGroup(
ApplyPointwiseFunction(
fun, self.graph,
run_time=run_time
),
ApplyMethod(
self.amplitude_value.set_value, new_ampl,
run_time=run_time
),
self.ac_circuit.get_electron_anim(run_time),
lag_ratio=0)
class EulersFormulaIntro(Scene):
CONFIG = {
"x_color": GREEN_D,
"y_color": RED_D,
}
def construct(self):
# add eulers identity equation
eulers_identity = TexMobject(
"e^{j \\pi} = -1",
tex_to_color_map={
"j": PURPLE,
"\\pi": YELLOW
}
)\
.scale(4)
self.play(Write(eulers_identity))
self.wait(14.91)
# add eulers formula
eulers_formula = TexMobject(
"Im \\{ e^{j \\theta} \\}", " = ", "cos(", "\\theta", ")", " + j ", "sin(", "\\theta", ")",
# tex_to_color_map={
# "j": PURPLE,
# "\\theta": YELLOW
# }
) \
.scale(2) \
.move_to(2.7 * RIGHT + DOWN)
eulers_formula[2].set_color(self.x_color)
eulers_formula[4].set_color(self.x_color)
eulers_formula[6].set_color(self.y_color)
eulers_formula[8].set_color(self.y_color)
eulers_formula[0][4].set_color(PURPLE)
eulers_formula[5][1].set_color(PURPLE)
eulers_formula[0][5].set_color(YELLOW)
eulers_formula[3].set_color(YELLOW)
eulers_formula[-2].set_color(YELLOW)
imag_label = VGroup(
*eulers_formula[0][:3],
eulers_formula[0][-1]
)
imag_label.set_opacity(0)
self.play(
Transform(
eulers_identity, eulers_formula
)
)
self.wait(10)
class VideoRecommendEulerIdentity(Scene):
def construct(self):
height = 7
rect = Rectangle(
height=height,
width=(16/9)*height
)\
.to_edge(DOWN, buff=1)
self.add(rect)
title = TextMobject("Understanding e to the i pi in 3.14 minutes | DE5 - 3Blue1Brown")\
.scale(1.2)\
.next_to(rect, direction=UP)
self.play(
Write(title)
)
self.wait(10)
class EulersFormula(SineWaveCharacteristics):
CONFIG = {
"y_draw_length": 4*PI,
"num_period_labels": 4,
"imag_axes_config": {
"number_line_config": {
"include_tip": True,
},
"x_axis_config": {
"color": BLUE_C,
"tick_frequency": 10
},
"y_axis_config": {
"color": BLUE_C,
"tick_frequency": 10
},
"x_min": -2.1,
"x_max": 2.1,
"y_min": -2.1,
"y_max": 2.1,
"center_point": FRAME_WIDTH*0.25*LEFT + FRAME_HEIGHT*0.25*UP + 0.3*UP + 1.7*LEFT,
},
"sin_axes_config": {
"number_line_config": {
"include_tip": True,
},
"x_axis_config": {
"color": BLUE_C,
"tick_frequency": 1
},
"y_axis_config": {
"color": BLUE_C,
"tick_frequency": 1
},
"x_min": -0.2,
"x_max": 10,
"y_min": -2.1,
"y_max": 2.1,
"center_point": FRAME_WIDTH * 0.25 * LEFT + FRAME_HEIGHT * 0.25 * UP + 0.3 * UP + 2*RIGHT,
},
"cos_axes_config": {
"number_line_config": {
"include_tip": True,
},
"x_axis_config": {
"color": BLUE_C,
"tick_frequency": 1
},
"y_axis_config": {
"color": BLUE_C,
"tick_frequency": 1
},
"x_min": -0.2,
"x_max": 7,
"y_min": -2.1,
"y_max": 2.1,
"center_point": FRAME_WIDTH * 0.25 * LEFT + FRAME_HEIGHT * 0.25 * UP + 2.2 * DOWN + 1.7*LEFT,
},
"x_color": GREEN_D,
"y_color": RED_D,
"show_x_graph": False,
"show_y_graph": False,
# "amplitude_color": ORANGE,
# "ang_freq_color": BLUE_C,
# "phase_color": RED_B,
}
def construct(self):
self.add(
Rectangle(
width=FRAME_WIDTH,
height=FRAME_HEIGHT,
color=PURPLE
)
)
# add eulers formula
eulers_formula = TexMobject(
"Im \\{ e^{j \\theta} \\}", " = ", "cos(", "\\theta", ")"," + j ", "sin(", "\\theta", ")",
# tex_to_color_map={
# "j": PURPLE,
# "\\theta": YELLOW
# }
) \
.scale(2)\
.move_to(2.5*RIGHT + 1*DOWN)
eulers_formula[2].set_color(self.x_color)
eulers_formula[4].set_color(self.x_color)
eulers_formula[6].set_color(self.y_color)
eulers_formula[8].set_color(self.y_color)
eulers_formula[0][4].set_color(PURPLE)
eulers_formula[5][1].set_color(PURPLE)
eulers_formula[0][5].set_color(YELLOW)
eulers_formula[3].set_color(YELLOW)
eulers_formula[-2].set_color(YELLOW)
eq_imag_label = VGroup(
*eulers_formula[0][:3],
eulers_formula[0][-1]
)
eq_imag_label.set_opacity(0)
self.add(eulers_formula)
self.wait(3.48)
self.create_circle_mobs()
ejt_rect = SurroundingRectangle(
VGroup(*eulers_formula[0][3:6]),
fill_color=YELLOW,
fill_opacity=0.5
)
self.play(
self.get_drawing_anims(1.65),
)
self.play(
FadeInFrom(ejt_rect, direction=UP),
self.get_drawing_anims(2*PI - 1.65),
)
# phase must be a multiple of 2*PI at this point
self.time_value.set_value(0)
self.wait(1)
self.play(
FadeOut(ejt_rect)
)
self.show_x_graph = True
self.show_y_graph = True
self.setup_cos_mobs()
self.cos_graph = self.cos_axes.get_graph(
np.cos,
color=self.x_color,
x_min=0,
x_max=self.y_draw_length
)
# add sin axes
self.setup_sine_mobs()
self.sin_graph = self.sin_axes.get_graph(
np.sin,
color=self.y_color,
x_min=0,
x_max=4*PI
)
self.add(self.sin_axes)
cos_rects = VGroup(
SurroundingRectangle(
VGroup(*eulers_formula[2:5]),
color=self.x_color,
fill_color=self.x_color,
fill_opacity=0.5
),
SurroundingRectangle(
VGroup(self.cos_label),
color=self.x_color,
fill_color=self.x_color,
fill_opacity=0.5
),
)
sin_rects = VGroup(
SurroundingRectangle(
VGroup(*eulers_formula[6:]),
color=self.y_color,
fill_color=self.y_color,
fill_opacity=0.5
),
SurroundingRectangle(
VGroup(self.sin_label),
color=self.y_color,
fill_color=self.y_color,
fill_opacity=0.5
),
)
# draw graphs
self.play(
self.get_drawing_anims(self.y_draw_length),
ShowCreation(
self.cos_graph,
run_time=self.y_draw_length,
rate_func=linear
),
ShowCreation(
self.sin_graph,
run_time=self.y_draw_length,
rate_func=linear
),
AnimationGroup(
FadeIn(Dot().shift(100 * UR), run_time=1.26),
FadeIn(cos_rects),
FadeIn(Dot().shift(100*UR), run_time=3),
FadeIn(sin_rects),
lag_ratio=1
)
)
self.play(
FadeOut(VGroup(sin_rects, cos_rects)),
self.get_drawing_anims(5.91),
)
# replace with extended sine wave
self.remove(self.sin_graph)
self.sin_graph = self.sin_axes.get_graph(
np.sin,
color=self.y_color,
x_min=0,
x_max=30
)
self.add(self.sin_graph)
# only show imaginary part
self.show_x_graph = False
self.play(
AnimationGroup(
AnimationGroup(
FadeOut(VGroup(*eulers_formula[2:6])),
ApplyMethod(eq_imag_label.set_opacity, 1),
lag_ratio=0
),
AnimationGroup(
# ApplyMethod(VGroup(*eulers_formula[:2]).shift, 2 * RIGHT),
ApplyMethod(VGroup(*eulers_formula[6:]).shift, 4 * LEFT),
lag_ratio=0
),
lag_ratio=1
),
FadeOut(self.cos_axes),
FadeOut(self.time_label2),
FadeOut(self.cos_label),
FadeOut(self.cos_dot),
FadeOut(self.x_draw_line),
FadeOut(self.cos_graph),
self.get_drawing_anims(2)
)
self.play(
self.get_drawing_anims(13.17),
)
cur_eulers_formula = VGroup(
*eulers_formula[:2],
*eulers_formula[6:]
)
general_eulers_formula = TexMobject(
"Im \\{ A e^{j (\\omega t + \\phi)} \\}", " = ", "A", "sin(", "\\omega", "t", "+", "\\phi", ")",
) \
.scale(2)\
.next_to(cur_eulers_formula, direction=DOWN, buff=0.3)
VGroup(
general_eulers_formula[0][3],
general_eulers_formula[2]
).set_color(self.amplitude_color)
VGroup(
general_eulers_formula[0][7],
general_eulers_formula[4]
).set_color(self.ang_freq_color)
VGroup(
general_eulers_formula[0][10],
general_eulers_formula[7]
).set_color(self.phase_color)
general_eulers_formula[0][5].set_color(PURPLE)
VGroup(
general_eulers_formula[3],
general_eulers_formula[8]
).set_color(self.y_color)
new_sin_label = TexMobject(
"A", "sin(", "\\omega", "t", "+", "\\phi", ")",
tex_to_color_map={
"A": self.amplitude_color,
"sin(": self.y_color,
"\\omega": self.ang_freq_color,
"\\phi": self.phase_color,
")": self.y_color
}
) \
.next_to(self.sin_axes.y_axis.get_top() + 0.4 * DOWN, direction=RIGHT)
self.time_label_t = TexMobject("time(t)", color=WHITE) \
.next_to(self.sin_axes.x_axis.get_right(), direction=DOWN) \
.shift(0.2*LEFT)
self.play(
TransformFromCopy(
cur_eulers_formula, general_eulers_formula
),
Transform(
self.sin_label, new_sin_label
),
Transform(
self.time_label1, self.time_label_t
),
self.get_drawing_anims(4.96)
)
# indicate time axis label
kw_arg = {
"fill_color": YELLOW,
"fill_opacity": 0.5
}
kw_theta_arg = {
"fill_color": GREEN_D,
"fill_opacity": 0.5,
"stroke_color": GREEN_D
}
t_ax_rect = SurroundingRectangle(self.time_label_t, **kw_arg)
t_gen_rects = VGroup(
SurroundingRectangle(general_eulers_formula[0][8], **kw_arg),
SurroundingRectangle(general_eulers_formula[-4], **kw_arg),
)
theta_rects = VGroup(
SurroundingRectangle(eulers_formula[0][5], **kw_theta_arg),
SurroundingRectangle(eulers_formula[-2], **kw_theta_arg),
)
self.play(
ShowCreation(t_ax_rect),
self.get_drawing_anims(2.35)
)
self.play(
TransformFromCopy(
t_ax_rect, t_gen_rects
),
self.get_drawing_anims(1.43)
)
self.play(
FadeIn(
theta_rects
),
self.get_drawing_anims(2)
)
self.play(
FadeOut(t_ax_rect),
FadeOut(t_gen_rects),
FadeOut(theta_rects),
self.get_drawing_anims(9.86)
)
# show rectangles around arguments
arg_rects = VGroup(
SurroundingRectangle(VGroup(*general_eulers_formula[0][7:11]), **kw_arg),
SurroundingRectangle(VGroup(*general_eulers_formula[4:8]), **kw_arg),
SurroundingRectangle(VGroup(*eulers_formula[0][5]), **kw_arg),
SurroundingRectangle(eulers_formula[-2], **kw_arg),
)
self.play(
FadeIn(arg_rects),
self.get_drawing_anims(4.7)
)
# show argument formula
argument_formula = TexMobject(
"\"Argument\" = \\theta = \\omega", " t + \\phi",
tex_to_color_map={
"\"Argument\"": YELLOW,
"\\theta": YELLOW,
"\\omega": self.ang_freq_color,
"\\phi": self.phase_color
}
)\
.scale(2)\
.next_to(general_eulers_formula, direction=DOWN, buff=0.3)\
.shift(1.8*LEFT)
theta_rect = SurroundingRectangle(
VGroup(self.theta_value, self.theta_label),
**kw_arg
)
self.play(
Write(argument_formula),
self.get_drawing_anims(3.22)
)
self.play(
TransformFromCopy(arg_rects, theta_rect),
self.get_drawing_anims(2)
)
# set angle to a multiple of 2*PI
# ** will not work when ang_freq != 1
cur_ang = self.get_ang()
del_ang = 2*PI*math.ceil(cur_ang/(2*PI))-cur_ang
self.play(
self.get_drawing_anims(del_ang+0.00001)
)
self.time_value.set_value(0)
# remove argument rectangles
self.play(
FadeOut(arg_rects),
FadeOut(theta_rect)
)
# focus on argument
self.play(
FadeOutAndShift(
cur_eulers_formula, direction=UP
),
ApplyMethod(
general_eulers_formula.shift, 2*UP
),
FadeOutAndShift(
VGroup(*argument_formula[:2]), direction=UL
),
ApplyMethod(
VGroup(*argument_formula[2:]).shift, 2*LEFT + 2*UP
),
)
self.wait(2.61)
# remove phase
phase_mobs = VGroup(
argument_formula[-1],
argument_formula[-2][1],
*general_eulers_formula[0][9:11],
*general_eulers_formula[6:8]
)
self.play(
ApplyMethod(
phase_mobs.set_opacity, 0.15
)
)
# add brace for time
time_brace = Brace(
argument_formula[5][0],
direction=DOWN,
buff=0.1
)
time_brace_width = time_brace.get_width()
time_brace.set_width(1.1 * time_brace_width, stretch=True)
time_text = DecimalNumber(
0,
num_decimal_places=1
)\
.scale(1.65)\
.next_to(time_brace, direction=DOWN, buff=0.1)\
.shift(0.25*LEFT)
self.play(
ShowCreation(time_brace),
)
self.wait(1.48)
self.play(
FadeInFrom(time_text, direction=DOWN),
)
time_text.add_updater(lambda x: x.set_value(self.time_value.get_value()))
# note that theta is equal to zero
new_equals = TexMobject("=", color=YELLOW)\
.scale(2) \
.next_to(argument_formula[2], direction=LEFT, buff=0.3)
theta_label2 = DecimalNumber(
0,
num_decimal_places=1,
unit="\\pi",
edge_to_fix=RIGHT,
color=YELLOW
)\
.scale(2)\
.next_to(new_equals, direction=LEFT, buff=0.3)
self.play(
FadeIn(theta_label2),
FadeIn(new_equals),
)
self.wait(2.61)
# label complete rotations
multiples_two_pi = TexMobject(
"\\text{cycle starts at} \\hspace{1.5mm} \\theta = ",
*[str(2*i) + "\\pi " for i in range(10)],
color=YELLOW,
)
multiples_two_pi.scale(2)
multiples_two_pi.arrange(RIGHT, buff=1)\
.to_edge(LEFT)\
.shift(4.3*DOWN + 0.5*LEFT)
self.play(
FadeIn(multiples_two_pi[0])
)
self.play(
TransformFromCopy(
theta_label2, multiples_two_pi[1]
)
)
# add updater for second theta label
theta_label2.add_updater(
lambda x: x.set_value(self.get_ang() / PI)
)
# complete one rotation
self.play(
self.get_drawing_anims(2*PI)
)
self.wait(2.22)
# copy 2 pi
self.play(
Transform(
theta_label2.copy().clear_updaters(), multiples_two_pi[2]
)
)
self.wait()
for i in range(3, 5):
# complete one rotation
self.play(
self.get_drawing_anims(2 * PI)
)
# copy 2*i*pi
self.play(
Transform(
theta_label2.copy().clear_updaters(), multiples_two_pi[i]
)
)
# add period labels
self.period_lines, self.period_labels = self.get_period_labels()
self.play(
FadeIn(
self.period_labels,
self.period_lines
)
)
# period_label.add_updater(
# lambda x: x.set_value((2*PI)/self.freq_value.get_value())
# )
self.time_value.set_value(0)
# change frequency of wave
self.play(
self.get_freq_anim(new_freq=2,run_time=2),
)
self.play(
self.get_drawing_anims(2*PI)
)
self.wait()
def get_drawing_anims(self, run_time=1.):
anims = [
ApplyMethod(
self.time_value.increment_value, run_time*self.freq_value.get_value(),
rate_func=linear,
run_time=run_time
),
UpdateFromFunc(
self.dot, self.dot_update,
rate_func=linear,
run_time=run_time
),
UpdateFromFunc(
self.ang_line, self.ang_line_update,
rate_func=linear,
run_time=run_time
),
]
if self.show_y_graph:
anims += [
UpdateFromFunc(
self.y_draw_line, self.y_draw_line_update,
rate_func=linear,
run_time=run_time
),
UpdateFromFunc(
self.sin_dot, self.sin_dot_update,
rate_func=linear,
run_time=run_time
)
]
if self.show_x_graph:
anims += [
UpdateFromFunc(
self.x_draw_line, self.x_draw_line_update,
rate_func=linear,
run_time=run_time
),
UpdateFromFunc(
self.cos_dot, self.cos_dot_update,
rate_func=linear,
run_time=run_time
)
]
return AnimationGroup(*anims)
def get_period_labels(self):
period_lines = VGroup()
period_labels = VGroup()
for k in range(1,self.num_period_labels+1):
num = ""
if k != 1:
num = str(k)
period_lines.add(
DashedLine(
self.sin_axes.center_point + 2 * PI * k * RIGHT + 1.2 * UP,
self.sin_axes.center_point + 2 * PI * k *RIGHT + 1.2 * DOWN,
color=YELLOW
)
)
period_labels.add(
TexMobject(
num + "T",
color=YELLOW
)
.next_to(period_lines[k-1].get_top(), direction=RIGHT)
)
return period_lines, period_labels
def ang_line_update(self, l):
x = self.ampl_value.get_value() * np.cos(self.get_ang())
y = self.ampl_value.get_value() * np.sin(self.get_ang()) + 0.0000001
l.put_start_and_end_on(
self.real_imag_axes.center_point,
self.real_imag_axes.center_point + x * RIGHT + y * UP
)
def y_draw_line_update(self, l):
x = self.ampl_value.get_value() * np.cos(self.get_ang())
y = self.ampl_value.get_value() * np.sin(self.get_ang()) + 0.0000001
l.put_start_and_end_on(
self.real_imag_axes.center_point + x * RIGHT + y * UP,
self.sin_axes.center_point + (self.time_value.get_value() % self.y_draw_length)*RIGHT + y*UP
)
def x_draw_line_update(self, l):
x = self.ampl_value.get_value() * np.cos(self.get_ang())
y = self.ampl_value.get_value() * np.sin(self.get_ang()) + 0.0000001
l.put_start_and_end_on(
self.real_imag_axes.center_point + x * RIGHT + y * UP,
self.cos_axes.center_point + (self.time_value.get_value() % (2*PI))*DOWN + x*RIGHT
)
def x_line_update(self, l):
x = self.ampl_value.get_value() * np.cos(self.get_ang())
y = self.ampl_value.get_value() * np.sin(self.get_ang()) + 0.0000001
l.put_start_and_end_on(
self.real_imag_axes.center_point + y * UP,
self.real_imag_axes.center_point + x * RIGHT + y * UP
)
def y_line_update(self, l):
x = self.ampl_value.get_value()*np.cos(self.get_ang())
y = self.ampl_value.get_value()*np.sin(self.get_ang()) + 0.0000001
l.put_start_and_end_on(
self.real_imag_axes.center_point + x*RIGHT,
self.real_imag_axes.center_point + x*RIGHT + y*UP
)
def dot_update(self, d):
x = self.ampl_value.get_value() * np.cos(self.get_ang())
y = self.ampl_value.get_value() * np.sin(self.get_ang()) + 0.0000001
d.move_to(self.real_imag_axes.center_point +
x*RIGHT + y*UP )
def sin_dot_update(self, d):
y = self.ampl_value.get_value() * np.sin(self.get_ang()) + 0.0000001
d.move_to(self.sin_axes.center_point + (self.time_value.get_value() % self.y_draw_length)*RIGHT + y*UP)
def cos_dot_update(self, d):
x = self.ampl_value.get_value() * np.cos(self.get_ang())
d.move_to(self.cos_axes.center_point + (self.time_value.get_value() % (2*PI))*DOWN + x*RIGHT)
def get_ang(self):
return self.phase_value.get_value() + self.time_value.get_value() * self.freq_value.get_value()
def create_circle_mobs(self):
self.phase_value = ValueTracker(0)
self.freq_value = ValueTracker(1)
self.ampl_value = ValueTracker(1)
self.time_value = ValueTracker(0)
# add real - imag axes
self.real_imag_axes = Axes(**self.imag_axes_config)
# manually adding tips to left and bottom
self.real_imag_axes.x_axis.add_tip(at_start=True)
self.real_imag_axes.y_axis.add_tip(at_start=True)
# manually remove tick marks
self.real_imag_axes.x_axis.big_tick_marks.set_opacity(0)
self.real_imag_axes.y_axis.big_tick_marks.set_opacity(0)
self.dot = Dot(color=YELLOW) \
.move_to(self.real_imag_axes.center_point + RIGHT)
# self.y_line = Line(color=self.y_color)
# self.x_line = Line(color=self.x_color)
self.circle = Circle(
width=2,
color=PINK,
stroke_opacity=0.5
)\
.move_to(self.real_imag_axes.center_point)
self.ang_line = Line(
self.real_imag_axes.center_point,
self.real_imag_axes.center_point + RIGHT,
color=YELLOW
)
self.ang_arc = Arc(
arc_center=self.real_imag_axes.center_point,
radius=0.3,
color=YELLOW
)
self.ang_arc.add_updater(
lambda m: m.become(
Arc(
radius=0.3,
start_angle=0,
arc_center=self.real_imag_axes.center_point,
angle=self.get_ang() % (2*PI),
color=YELLOW
)
)
)
imag_label = TexMobject(
"Imag",
color=self.y_color
)\
.scale(0.8)\
.next_to(self.real_imag_axes.y_axis.get_top()+0.3*DOWN, direction=RIGHT)
self.add(imag_label)
real_label = TexMobject(
"Real",
color=self.x_color
) \
.scale(0.8) \
.next_to(self.real_imag_axes.x_axis.get_right(), direction=UP)\
.shift(0.5*RIGHT)
self.theta_label = TexMobject("\\theta = ", color=YELLOW) \
.scale(0.9) \
.move_to(self.real_imag_axes.center_point + 0.5*UP + 0.5*RIGHT)
self.theta_value = DecimalNumber(
0,
num_decimal_places=1,
unit="\\pi",
edge_to_fix=LEFT,
color=YELLOW
)\
.scale(0.9)\
.next_to(self.theta_label, direction=RIGHT, buff=0.1)
self.play(
FadeIn(self.real_imag_axes),
FadeIn(self.dot),
FadeIn(self.circle),
FadeIn(self.ang_line),
FadeIn(self.ang_arc),
FadeIn(imag_label),
FadeIn(real_label),
FadeIn(self.theta_label),
FadeIn(self.theta_value)
)
self.theta_value.add_updater(
lambda x: x.set_value((self.get_ang()/PI)%2)
)
def setup_cos_mobs(self):
# add cos axes
self.cos_axes = Axes(**self.cos_axes_config)
self.cos_axes.y_axis.add_tip(at_start=True)
self.cos_axes.y_axis.big_tick_marks.set_opacity(0)
self.cos_axes.y_axis.tick_marks.set_opacity(0)
self.cos_axes.rotate(-PI / 2, about_point=self.cos_axes.center_point)
self.time_label2 = TexMobject("\\theta", color=YELLOW) \
.next_to(self.cos_axes.x_axis.get_bottom() + 2.25 * UP, direction=LEFT)
self.add()
self.cos_label = TexMobject(
"cos(", "\\theta", ")",
color=self.x_color,
tex_to_color_map={
"\\theta": YELLOW
}
) \
.next_to(self.cos_axes.y_axis.get_right(), direction=DOWN, buff=0.2)
self.cos_dot = Dot(color=YELLOW)\
.move_to(self.cos_axes.center_point + RIGHT)
self.x_draw_line = DashedLine(
self.real_imag_axes.center_point + RIGHT,
self.cos_axes.center_point + RIGHT,
color=self.x_color,
stroke_opacity=0.6
)
self.play(
FadeIn(self.cos_axes),
FadeIn(self.time_label2),
FadeIn(self.cos_label),
FadeIn(self.cos_dot),
FadeIn(self.x_draw_line)
)
def setup_sine_mobs(self):
self.sin_axes = Axes(**self.sin_axes_config)
self.sin_axes.y_axis.add_tip(at_start=True)
self.sin_axes.y_axis.big_tick_marks.set_opacity(0)
self.sin_axes.y_axis.tick_marks.set_opacity(0)
self.sin_label = TexMobject(
"sin(", "\\theta", ")",
color=self.y_color,
tex_to_color_map={
"\\theta": YELLOW
}
) \
.next_to(self.sin_axes.y_axis.get_top() + 0.4 * DOWN, direction=RIGHT)
self.time_label1 = TexMobject("\\theta", color=YELLOW) \
.next_to(self.sin_axes.x_axis.get_right(), direction=DOWN) \
.shift(0.2*LEFT)
self.sin_dot = Dot(color=YELLOW)\
.move_to(self.sin_axes.center_point)
self.y_draw_line = DashedLine(
self.real_imag_axes.center_point + RIGHT,
self.sin_axes.center_point,
color=self.y_color,
stroke_opacity=0.6
)
self.play(
FadeIn(self.sin_axes),
FadeIn(self.sin_label),
FadeIn(self.time_label1),
FadeIn(self.sin_dot),
FadeIn(self.y_draw_line)
)
def get_freq_anim(self, new_freq, run_time=1.):
anims = []
# stretch sine wave
cur_freq = self.freq_value.get_value()
self.freq_value.set_value(new_freq)
transform_matrix = np.array(
[[cur_freq/new_freq, 0, 0],
[0, 1, 0],
[0, 0, 1]]
)
offset = np.array(
[(1-cur_freq/new_freq)*self.sin_axes.center_point[0],
0,
0]
)
def fun(p):
return offset + transform_matrix.dot(p)
anims.append(
ApplyPointwiseFunction(
fun, self.sin_graph,
run_time=run_time,
rate_func=linear
)
)
# shift all period labels
new_period = (2*PI)/new_freq
cur_period = (2*PI)/cur_freq
for k in range(1,self.num_period_labels + 1):
anims.append(
ApplyMethod(
VGroup(self.period_labels[k-1],self.period_lines[k-1]).shift, k*(cur_period - new_period)*LEFT,
run_time=run_time,
rate_func=linear
)
)
return AnimationGroup(*anims)
def get_amplitude_anim(self, new_ampl, run_time=1.):
self.ac_circuit.set_electron_amplitude_anim(new_ampl*self.base_electron_amplitude, run_time=run_time)
transform_matrix = np.array(
[[1, 0, 0],
[0, new_ampl/self.amplitude_value.get_value(), 0],
[0, 0, 1]]
)
def fun(p):
return self.time_axes.center_point + transform_matrix.dot(p-self.time_axes.center_point)
return AnimationGroup(
ApplyPointwiseFunction(
fun, self.graph,
run_time=run_time
),
ApplyMethod(
self.amplitude_value.set_value, new_ampl,
run_time=run_time
),
self.ac_circuit.get_electron_anim(run_time),
lag_ratio=0)
|
from pathlib import Path
import mlflow
import pandas as pd
import pytest
from kedro.extras.datasets.pandas import CSVDataSet
from kedro.extras.datasets.pickle import PickleDataSet
from mlflow.tracking import MlflowClient
from pytest_lazyfixture import lazy_fixture
from kedro_mlflow.io.artifacts import MlflowArtifactDataSet
@pytest.fixture
def tracking_uri(tmp_path):
return tmp_path / "mlruns"
@pytest.fixture
def df1():
return pd.DataFrame({"col1": [1, 2, 3], "col2": [4, 5, 6]})
@pytest.fixture
def df2():
return pd.DataFrame({"col3": [7, 8, 9], "col4": ["a", "b", "c"]})
@pytest.mark.parametrize(
"dataset,extension,data,artifact_path",
[
(CSVDataSet, ".csv", lazy_fixture("df1"), None),
("pandas.CSVDataSet", ".csv", lazy_fixture("df1"), None),
(PickleDataSet, ".pkl", lazy_fixture("df1"), None),
("pickle.PickleDataSet", ".pkl", lazy_fixture("df1"), None),
(CSVDataSet, ".csv", lazy_fixture("df1"), "artifact_dir"),
("pandas.CSVDataSet", ".csv", lazy_fixture("df1"), "artifact_dir"),
(PickleDataSet, ".pkl", lazy_fixture("df1"), "artifact_dir"),
(
"pickle.PickleDataSet",
".pkl",
lazy_fixture("df1"),
"artifact_dir",
),
],
)
def test_mlflow_csv_dataset_save_reload(
tmp_path, tracking_uri, dataset, extension, data, artifact_path
):
mlflow.set_tracking_uri(tracking_uri.as_uri())
mlflow_client = MlflowClient(tracking_uri=tracking_uri.as_uri())
filepath = (tmp_path / "data").with_suffix(extension)
mlflow_dataset = MlflowArtifactDataSet(
artifact_path=artifact_path,
data_set=dict(type=dataset, filepath=filepath.as_posix()),
)
with mlflow.start_run():
mlflow_dataset.save(data)
run_id = mlflow.active_run().info.run_id
# the artifact must be properly uploaded to "mlruns" and reloadable
run_artifacts = [
fileinfo.path
for fileinfo in mlflow_client.list_artifacts(run_id=run_id, path=artifact_path)
]
remote_path = (
filepath.name
if artifact_path is None
else (Path(artifact_path) / filepath.name).as_posix()
)
assert remote_path in run_artifacts
assert data.equals(mlflow_dataset.load())
@pytest.mark.parametrize(
"exists_active_run",
[(False), (True)],
)
def test_artifact_dataset_save_with_run_id(
tmp_path, tracking_uri, df1, exists_active_run
):
mlflow.set_tracking_uri(tracking_uri.as_uri())
mlflow_client = MlflowClient(tracking_uri=tracking_uri.as_uri())
nb_runs = 0
# create a first run and get its id
with mlflow.start_run():
mlflow.log_param("fake", 2)
run_id = mlflow.active_run().info.run_id
nb_runs += 1
# check behaviour when logging with an already opened run
if exists_active_run:
mlflow.start_run()
active_run_id = mlflow.active_run().info.run_id
nb_runs += 1
# then same scenario but the run_id where data is saved is specified
mlflow_csv_dataset = MlflowArtifactDataSet(
data_set=dict(type=CSVDataSet, filepath=(tmp_path / "df1.csv").as_posix()),
run_id=run_id,
)
mlflow_csv_dataset.save(df1)
# same tests as previously, bu no new experiments must have been created
runs_list = mlflow_client.list_run_infos(experiment_id="0")
run_artifacts = [
fileinfo.path for fileinfo in mlflow_client.list_artifacts(run_id=run_id)
]
assert len(runs_list) == nb_runs # no new run must have been created when saving
assert (
mlflow.active_run().info.run_id == active_run_id
if mlflow.active_run()
else True
) # if a run was opened before saving, it must be reopened
assert "df1.csv" in run_artifacts # the file must exists
assert df1.equals(mlflow_csv_dataset.load()) # and must loadable
if exists_active_run:
mlflow.end_run()
def test_is_versioned_dataset_logged_correctly_in_mlflow(tmp_path, tracking_uri, df1):
"""Check if versioned dataset is logged correctly in MLflow as artifact.
For versioned datasets just artifacts from current run should be logged.
"""
mlflow.set_tracking_uri(tracking_uri.as_uri())
mlflow_client = MlflowClient(tracking_uri=tracking_uri.as_uri())
with mlflow.start_run():
run_id = mlflow.active_run().info.run_id
mlflow_csv_dataset = MlflowArtifactDataSet(
data_set=dict(
type=CSVDataSet,
filepath=(tmp_path / "df1.csv").as_posix(),
versioned=True,
),
# run_id=run_id,
)
mlflow_csv_dataset.save(df1)
run_artifacts = [
fileinfo.path for fileinfo in mlflow_client.list_artifacts(run_id=run_id)
]
# Check if just one artifact was created in given run.
assert len(run_artifacts) == 1
artifact_path = mlflow_client.download_artifacts(
run_id=run_id, path=run_artifacts[0]
)
# Check if saved artifact is file and not folder where versioned datasets are stored.
assert Path(artifact_path).is_file()
assert df1.equals(mlflow_csv_dataset.load()) # and must loadable
def test_artifact_dataset_logging_deactivation(tmp_path, tracking_uri):
mlflow_pkl_dataset = MlflowArtifactDataSet(
data_set=dict(type=PickleDataSet, filepath=(tmp_path / "df1.csv").as_posix())
)
mlflow.set_tracking_uri(tracking_uri.as_uri())
mlflow_client = MlflowClient(tracking_uri=tracking_uri.as_uri())
mlflow_pkl_dataset._logging_activated = False
all_runs_id_beginning = set(
[
run.run_id
for k in range(len(mlflow_client.list_experiments()))
for run in mlflow_client.list_run_infos(experiment_id=f"{k}")
]
)
mlflow_pkl_dataset.save(2)
all_runs_id_end = set(
[
run.run_id
for k in range(len(mlflow_client.list_experiments()))
for run in mlflow_client.list_run_infos(experiment_id=f"{k}")
]
)
assert all_runs_id_beginning == all_runs_id_end
def test_mlflow_artifact_logging_deactivation_is_bool(tmp_path):
mlflow_csv_dataset = MlflowArtifactDataSet(
data_set=dict(type=CSVDataSet, filepath=(tmp_path / "df1.csv").as_posix())
)
with pytest.raises(ValueError, match="_logging_activated must be a boolean"):
mlflow_csv_dataset._logging_activated = "hello"
def test_artifact_dataset_load_with_run_id(tmp_path, tracking_uri, df1, df2):
mlflow.set_tracking_uri(tracking_uri.as_uri())
# define the logger
mlflow_csv_dataset = MlflowArtifactDataSet(
data_set=dict(type=CSVDataSet, filepath=(tmp_path / "df.csv").as_posix())
)
# create a first run, save a first dataset
with mlflow.start_run():
run_id1 = mlflow.active_run().info.run_id
mlflow_csv_dataset.save(df1)
# saving a second time will erase local dataset
with mlflow.start_run():
mlflow_csv_dataset.save(df2)
# if we load the dataset, it will be equal to the seond one, using the local filepath
assert df2.equals(mlflow_csv_dataset.load())
# update the logger and reload outside of an mlflow run : it should load the dataset if the first run id
mlflow_csv_dataset.run_id = run_id1
assert df1.equals(mlflow_csv_dataset.load())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.